blob: 3af862c56d76089d87cdf82945b44c924f1d4734 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -070018#include <linux/atomic.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053020#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080022#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080023#include <linux/fs.h>
24#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080025#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080026#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080027#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080028#include <linux/miscdevice.h>
29#include <linux/export.h>
30#include <linux/mm.h>
31#include <linux/mm_types.h>
32#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080033#include <linux/slab.h>
34#include <linux/seq_file.h>
35#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080036#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080037#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080038#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080039#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080040
41#include "ion.h"
42#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080043#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080044
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080045bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
46{
John Stultze1d855b2013-12-13 19:26:33 -080047 return (buffer->flags & ION_FLAG_CACHED) &&
48 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080049}
50
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080051bool ion_buffer_cached(struct ion_buffer *buffer)
52{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080053 return !!(buffer->flags & ION_FLAG_CACHED);
54}
55
56static inline struct page *ion_buffer_page(struct page *page)
57{
58 return (struct page *)((unsigned long)page & ~(1UL));
59}
60
61static inline bool ion_buffer_page_is_dirty(struct page *page)
62{
63 return !!((unsigned long)page & 1UL);
64}
65
66static inline void ion_buffer_page_dirty(struct page **page)
67{
68 *page = (struct page *)((unsigned long)(*page) | 1UL);
69}
70
71static inline void ion_buffer_page_clean(struct page **page)
72{
73 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080074}
75
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080076/* this function should only be called while dev->lock is held */
77static void ion_buffer_add(struct ion_device *dev,
78 struct ion_buffer *buffer)
79{
80 struct rb_node **p = &dev->buffers.rb_node;
81 struct rb_node *parent = NULL;
82 struct ion_buffer *entry;
83
84 while (*p) {
85 parent = *p;
86 entry = rb_entry(parent, struct ion_buffer, node);
87
88 if (buffer < entry) {
89 p = &(*p)->rb_left;
90 } else if (buffer > entry) {
91 p = &(*p)->rb_right;
92 } else {
93 pr_err("%s: buffer already found.", __func__);
94 BUG();
95 }
96 }
97
98 rb_link_node(&buffer->node, parent, p);
99 rb_insert_color(&buffer->node, &dev->buffers);
100}
101
102/* this function should only be called while dev->lock is held */
103static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200104 struct ion_device *dev,
105 unsigned long len,
106 unsigned long align,
107 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800108{
109 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800110 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800111 struct scatterlist *sg;
112 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800113
Ben Marsh411059f2016-03-28 19:26:19 +0200114 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800115 if (!buffer)
116 return ERR_PTR(-ENOMEM);
117
118 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800119 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800120 kref_init(&buffer->ref);
121
122 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800123
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800124 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800125 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
126 goto err2;
127
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800128 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800129 ret = heap->ops->allocate(heap, buffer, len, align,
130 flags);
131 if (ret)
132 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800133 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800134
Laura Abbottf82ad602016-08-08 09:52:56 -0700135 if (buffer->sg_table == NULL) {
136 WARN_ONCE(1, "This heap needs to set the sgtable");
Rohit kumara56d0922015-09-30 11:07:35 +0530137 ret = -EINVAL;
138 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800139 }
Rohit kumara56d0922015-09-30 11:07:35 +0530140
Laura Abbottf82ad602016-08-08 09:52:56 -0700141 table = buffer->sg_table;
142 buffer->dev = dev;
143 buffer->size = len;
144
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800145 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800146 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
147 struct scatterlist *sg;
148 int i, j, k = 0;
149
150 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
151 if (!buffer->pages) {
152 ret = -ENOMEM;
Laura Abbottf82ad602016-08-08 09:52:56 -0700153 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800154 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800155
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800156 for_each_sg(table->sgl, sg, table->nents, i) {
157 struct page *page = sg_page(sg);
158
Colin Cross06e0dca2013-12-13 14:25:02 -0800159 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800160 buffer->pages[k++] = page++;
161 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800162 }
163
164 buffer->dev = dev;
165 buffer->size = len;
166 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800167 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530168 /*
169 * this will set up dma addresses for the sglist -- it is not
170 * technically correct as per the dma api -- a specific
171 * device isn't really taking ownership here. However, in practice on
172 * our systems the only dma_address space is physical addresses.
173 * Additionally, we can't afford the overhead of invalidating every
174 * allocation via dma_map_sg. The implicit contract here is that
175 * memory coming from the heaps is ready for dma, ie if it has a
176 * cached mapping that mapping has been invalidated
177 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000178 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800179 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000180 sg_dma_len(sg) = sg->length;
181 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800182 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800183 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800184 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800185 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800186
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800187err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530188 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800189err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800190 kfree(buffer);
191 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800192}
193
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800194void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800195{
Laura Abbott8da07ee2018-05-14 14:35:09 -0700196 if (buffer->kmap_cnt > 0) {
197 pr_warn_once("%s: buffer still mapped in the kernel\n",
198 __func__);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800199 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbott8da07ee2018-05-14 14:35:09 -0700200 }
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800201 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100202 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800203 kfree(buffer);
204}
205
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800206static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800207{
208 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800209 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800210 struct ion_device *dev = buffer->dev;
211
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800212 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800213 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800214 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800215
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800216 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
217 ion_heap_freelist_add(heap, buffer);
218 else
219 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800220}
221
222static void ion_buffer_get(struct ion_buffer *buffer)
223{
224 kref_get(&buffer->ref);
225}
226
227static int ion_buffer_put(struct ion_buffer *buffer)
228{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800229 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800230}
231
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800232static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
233{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800234 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800235 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800236 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800237}
238
239static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
240{
241 /*
242 * when a buffer is removed from a handle, if it is not in
243 * any other handles, copy the taskcomm and the pid of the
244 * process it's being removed from into the buffer. At this
245 * point there will be no way to track what processes this buffer is
246 * being used by, it only exists as a dma_buf file descriptor.
247 * The taskcomm and pid can provide a debug hint as to where this fd
248 * is in the system
249 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800250 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800251 buffer->handle_count--;
252 BUG_ON(buffer->handle_count < 0);
253 if (!buffer->handle_count) {
254 struct task_struct *task;
255
256 task = current->group_leader;
257 get_task_comm(buffer->task_comm, task);
258 buffer->pid = task_pid_nr(task);
259 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800260 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800261}
262
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800263static struct ion_handle *ion_handle_create(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200264 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800265{
266 struct ion_handle *handle;
267
Ben Marsh411059f2016-03-28 19:26:19 +0200268 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800269 if (!handle)
270 return ERR_PTR(-ENOMEM);
271 kref_init(&handle->ref);
272 RB_CLEAR_NODE(&handle->node);
273 handle->client = client;
274 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800275 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800276 handle->buffer = buffer;
277
278 return handle;
279}
280
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800281static void ion_handle_kmap_put(struct ion_handle *);
282
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283static void ion_handle_destroy(struct kref *kref)
284{
285 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800286 struct ion_client *client = handle->client;
287 struct ion_buffer *buffer = handle->buffer;
288
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800289 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800290 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800291 ion_handle_kmap_put(handle);
292 mutex_unlock(&buffer->lock);
293
Colin Cross47b40452013-12-13 14:24:50 -0800294 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800295 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800296 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800297
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800298 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800299 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800300
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800301 kfree(handle);
302}
303
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800304static void ion_handle_get(struct ion_handle *handle)
305{
306 kref_get(&handle->ref);
307}
308
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700309/* Must hold the client lock */
310static struct ion_handle *ion_handle_get_check_overflow(
311 struct ion_handle *handle)
312{
313 if (atomic_read(&handle->ref.refcount) + 1 == 0)
314 return ERR_PTR(-EOVERFLOW);
315 ion_handle_get(handle);
316 return handle;
317}
318
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700319int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000320{
Johanna Abrahamsson45052462016-08-24 00:41:54 +0200321 return kref_put(&handle->ref, ion_handle_destroy);
EunTaik Lee95902322016-02-24 04:38:06 +0000322}
323
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700324int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800325{
Colin Cross83271f62013-12-13 14:24:59 -0800326 struct ion_client *client = handle->client;
327 int ret;
328
329 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000330 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800331 mutex_unlock(&client->lock);
332
333 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800334}
335
336static struct ion_handle *ion_handle_lookup(struct ion_client *client,
337 struct ion_buffer *buffer)
338{
Colin Crosse1cf3682013-12-13 14:24:51 -0800339 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800340
Colin Crosse1cf3682013-12-13 14:24:51 -0800341 while (n) {
342 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900343
Colin Crosse1cf3682013-12-13 14:24:51 -0800344 if (buffer < entry->buffer)
345 n = n->rb_left;
346 else if (buffer > entry->buffer)
347 n = n->rb_right;
348 else
349 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800350 }
Colin Cross9e907652013-12-13 14:24:49 -0800351 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800352}
353
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700354struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
355 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000356{
357 struct ion_handle *handle;
358
359 handle = idr_find(&client->idr, id);
360 if (handle)
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700361 return ion_handle_get_check_overflow(handle);
EunTaik Lee95902322016-02-24 04:38:06 +0000362
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -0700363 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800364}
365
John Stultze1d855b2013-12-13 19:26:33 -0800366static bool ion_handle_validate(struct ion_client *client,
367 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800368{
Colin Cross83271f62013-12-13 14:24:59 -0800369 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900370 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800371}
372
373static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
374{
Colin Crossb26661d2013-12-13 14:25:05 -0800375 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800376 struct rb_node **p = &client->handles.rb_node;
377 struct rb_node *parent = NULL;
378 struct ion_handle *entry;
379
Colin Crossb26661d2013-12-13 14:25:05 -0800380 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
381 if (id < 0)
382 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800383
Colin Crossb26661d2013-12-13 14:25:05 -0800384 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800385
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800386 while (*p) {
387 parent = *p;
388 entry = rb_entry(parent, struct ion_handle, node);
389
Colin Crosse1cf3682013-12-13 14:24:51 -0800390 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800391 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800392 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800393 p = &(*p)->rb_right;
394 else
395 WARN(1, "%s: buffer already found.", __func__);
396 }
397
398 rb_link_node(&handle->node, parent, p);
399 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800400
401 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800402}
403
404struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800405 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800406 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800407{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800408 struct ion_handle *handle;
409 struct ion_device *dev = client->dev;
410 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800411 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800412 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800413
Colin Crosse61fc912013-12-13 19:26:14 -0800414 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800415 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800416 /*
417 * traverse the list of heaps available in this system in priority
418 * order. If the heap type is supported by the client, and matches the
419 * request of the caller allocate from it. Repeat until allocate has
420 * succeeded or all heaps have been tried
421 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800422 len = PAGE_ALIGN(len);
423
Colin Crossa14baf72013-12-13 14:25:00 -0800424 if (!len)
425 return ERR_PTR(-EINVAL);
426
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800427 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800428 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800429 /* if the caller didn't specify this heap id */
430 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800431 continue;
432 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800433 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800434 break;
435 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800436 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800437
KyongHo Cho54ac07842013-12-13 14:23:39 -0800438 if (buffer == NULL)
439 return ERR_PTR(-ENODEV);
440
441 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200442 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800443
444 handle = ion_handle_create(client, buffer);
445
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800446 /*
447 * ion_buffer_create will create a buffer with a ref_cnt of 1,
448 * and ion_handle_create will take a second reference, drop one here
449 */
450 ion_buffer_put(buffer);
451
Colin Cross47b40452013-12-13 14:24:50 -0800452 if (IS_ERR(handle))
453 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800454
Colin Cross47b40452013-12-13 14:24:50 -0800455 mutex_lock(&client->lock);
456 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800457 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800458 if (ret) {
459 ion_handle_put(handle);
460 handle = ERR_PTR(ret);
461 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800462
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800463 return handle;
464}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800465EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800466
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700467void ion_free_nolock(struct ion_client *client,
468 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800469{
Johanna Abrahamssonc2bbedf2016-08-24 00:02:45 +0200470 if (!ion_handle_validate(client, handle)) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800471 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800472 return;
473 }
EunTaik Lee95902322016-02-24 04:38:06 +0000474 ion_handle_put_nolock(handle);
475}
476
477void ion_free(struct ion_client *client, struct ion_handle *handle)
478{
479 BUG_ON(client != handle->client);
480
481 mutex_lock(&client->lock);
482 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800483 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800485EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800486
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800487static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
488{
489 void *vaddr;
490
491 if (buffer->kmap_cnt) {
492 buffer->kmap_cnt++;
493 return buffer->vaddr;
494 }
495 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800496 if (WARN_ONCE(vaddr == NULL,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200497 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800498 return ERR_PTR(-EINVAL);
499 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800500 return vaddr;
501 buffer->vaddr = vaddr;
502 buffer->kmap_cnt++;
503 return vaddr;
504}
505
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800506static void *ion_handle_kmap_get(struct ion_handle *handle)
507{
508 struct ion_buffer *buffer = handle->buffer;
509 void *vaddr;
510
511 if (handle->kmap_cnt) {
512 handle->kmap_cnt++;
513 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800514 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800515 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800516 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800517 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800518 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800519 return vaddr;
520}
521
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800522static void ion_buffer_kmap_put(struct ion_buffer *buffer)
523{
524 buffer->kmap_cnt--;
525 if (!buffer->kmap_cnt) {
526 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
527 buffer->vaddr = NULL;
528 }
529}
530
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800531static void ion_handle_kmap_put(struct ion_handle *handle)
532{
533 struct ion_buffer *buffer = handle->buffer;
534
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700535 if (!handle->kmap_cnt) {
536 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
537 return;
538 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800539 handle->kmap_cnt--;
540 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800541 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800542}
543
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800544void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
545{
546 struct ion_buffer *buffer;
547 void *vaddr;
548
549 mutex_lock(&client->lock);
550 if (!ion_handle_validate(client, handle)) {
551 pr_err("%s: invalid handle passed to map_kernel.\n",
552 __func__);
553 mutex_unlock(&client->lock);
554 return ERR_PTR(-EINVAL);
555 }
556
557 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800558
559 if (!handle->buffer->heap->ops->map_kernel) {
560 pr_err("%s: map_kernel is not implemented by this heap.\n",
561 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800562 mutex_unlock(&client->lock);
563 return ERR_PTR(-ENODEV);
564 }
565
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800566 mutex_lock(&buffer->lock);
567 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800568 mutex_unlock(&buffer->lock);
569 mutex_unlock(&client->lock);
570 return vaddr;
571}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800572EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800573
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800574void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
575{
576 struct ion_buffer *buffer;
577
578 mutex_lock(&client->lock);
579 buffer = handle->buffer;
580 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800581 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800582 mutex_unlock(&buffer->lock);
583 mutex_unlock(&client->lock);
584}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800585EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800586
Neil Zhang948c4db2016-01-26 17:39:06 +0800587static struct mutex debugfs_mutex;
588static struct rb_root *ion_root_client;
589static int is_client_alive(struct ion_client *client)
590{
591 struct rb_node *node;
592 struct ion_client *tmp;
593 struct ion_device *dev;
594
595 node = ion_root_client->rb_node;
596 dev = container_of(ion_root_client, struct ion_device, clients);
597
598 down_read(&dev->lock);
599 while (node) {
600 tmp = rb_entry(node, struct ion_client, node);
601 if (client < tmp) {
602 node = node->rb_left;
603 } else if (client > tmp) {
604 node = node->rb_right;
605 } else {
606 up_read(&dev->lock);
607 return 1;
608 }
609 }
610
611 up_read(&dev->lock);
612 return 0;
613}
614
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800615static int ion_debug_client_show(struct seq_file *s, void *unused)
616{
617 struct ion_client *client = s->private;
618 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800619 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800620 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800621 int i;
622
Neil Zhang948c4db2016-01-26 17:39:06 +0800623 mutex_lock(&debugfs_mutex);
624 if (!is_client_alive(client)) {
Alistair Strachan7cc0d0f2018-04-25 22:11:01 -0700625 seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n",
Neil Zhang948c4db2016-01-26 17:39:06 +0800626 client);
627 mutex_unlock(&debugfs_mutex);
628 return 0;
629 }
630
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800631 mutex_lock(&client->lock);
632 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
633 struct ion_handle *handle = rb_entry(n, struct ion_handle,
634 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800635 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800636
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800637 if (!names[id])
638 names[id] = handle->buffer->heap->name;
639 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800640 }
641 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800642 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800643
644 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800645 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800646 if (!names[i])
647 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800648 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800649 }
650 return 0;
651}
652
653static int ion_debug_client_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, ion_debug_client_show, inode->i_private);
656}
657
658static const struct file_operations debug_client_fops = {
659 .open = ion_debug_client_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800665static int ion_get_client_serial(const struct rb_root *root,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200666 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800667{
668 int serial = -1;
669 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900670
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800671 for (node = rb_first(root); node; node = rb_next(node)) {
672 struct ion_client *client = rb_entry(node, struct ion_client,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700673 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900674
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800675 if (strcmp(client->name, name))
676 continue;
677 serial = max(serial, client->display_serial);
678 }
679 return serial + 1;
680}
681
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800682struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800683 const char *name)
684{
685 struct ion_client *client;
686 struct task_struct *task;
687 struct rb_node **p;
688 struct rb_node *parent = NULL;
689 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800690 pid_t pid;
691
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800692 if (!name) {
693 pr_err("%s: Name cannot be null\n", __func__);
694 return ERR_PTR(-EINVAL);
695 }
696
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800697 get_task_struct(current->group_leader);
698 task_lock(current->group_leader);
699 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530700 /*
701 * don't bother to store task struct for kernel threads,
702 * they can't be killed anyway
703 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800704 if (current->group_leader->flags & PF_KTHREAD) {
705 put_task_struct(current->group_leader);
706 task = NULL;
707 } else {
708 task = current->group_leader;
709 }
710 task_unlock(current->group_leader);
711
Ben Marsh411059f2016-03-28 19:26:19 +0200712 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800713 if (!client)
714 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800715
716 client->dev = dev;
717 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800718 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800719 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800720 client->task = task;
721 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800722 client->name = kstrdup(name, GFP_KERNEL);
723 if (!client->name)
724 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800725
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800726 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800727 client->display_serial = ion_get_client_serial(&dev->clients, name);
728 client->display_name = kasprintf(
729 GFP_KERNEL, "%s-%d", name, client->display_serial);
730 if (!client->display_name) {
731 up_write(&dev->lock);
732 goto err_free_client_name;
733 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800734 p = &dev->clients.rb_node;
735 while (*p) {
736 parent = *p;
737 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800738
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800739 if (client < entry)
740 p = &(*p)->rb_left;
741 else if (client > entry)
742 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800743 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800744 rb_link_node(&client->node, parent, p);
745 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800746
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800747 client->debug_root = debugfs_create_file(client->display_name, 0664,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700748 dev->clients_debug_root,
749 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800750 if (!client->debug_root) {
751 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700752
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800753 path = dentry_path(dev->clients_debug_root, buf, 256);
754 pr_err("Failed to create client debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200755 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800756 }
757
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800758 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800759
760 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800761
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800762err_free_client_name:
763 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800764err_free_client:
765 kfree(client);
766err_put_task_struct:
767 if (task)
768 put_task_struct(current->group_leader);
769 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800770}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800771EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800772
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800773void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800774{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800775 struct ion_device *dev = client->dev;
776 struct rb_node *n;
777
Neil Zhang948c4db2016-01-26 17:39:06 +0800778 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800779 while ((n = rb_first(&client->handles))) {
780 struct ion_handle *handle = rb_entry(n, struct ion_handle,
781 node);
782 ion_handle_destroy(&handle->ref);
783 }
Colin Cross47b40452013-12-13 14:24:50 -0800784
Colin Cross47b40452013-12-13 14:24:50 -0800785 idr_destroy(&client->idr);
786
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800787 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800788 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800789 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800790 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800791 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800792 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800793
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800794 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800795 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800796 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800797 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800798}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800799EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800800
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800801static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
802 struct device *dev,
803 enum dma_data_direction direction);
804
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800805static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
806 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800807{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800808 struct dma_buf *dmabuf = attachment->dmabuf;
809 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800810
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800811 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800812 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800813}
814
815static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
816 struct sg_table *table,
817 enum dma_data_direction direction)
818{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800819}
820
Colin Crosse946b202013-12-13 14:25:01 -0800821void ion_pages_sync_for_device(struct device *dev, struct page *page,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200822 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -0800823{
824 struct scatterlist sg;
825
826 sg_init_table(&sg, 1);
827 sg_set_page(&sg, page, size, 0);
828 /*
829 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530830 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800831 * hardware.
832 */
833 sg_dma_address(&sg) = page_to_phys(page);
834 dma_sync_sg_for_device(dev, &sg, 1, dir);
835}
836
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800837struct ion_vma_list {
838 struct list_head list;
839 struct vm_area_struct *vma;
840};
841
842static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
843 struct device *dev,
844 enum dma_data_direction dir)
845{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800846 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800847 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
848 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800849
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800850 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800851 return;
852
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800853 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800854 for (i = 0; i < pages; i++) {
855 struct page *page = buffer->pages[i];
856
857 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800858 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200859 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -0800860
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800861 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800862 }
863 list_for_each_entry(vma_list, &buffer->vmas, list) {
864 struct vm_area_struct *vma = vma_list->vma;
865
866 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
867 NULL);
868 }
869 mutex_unlock(&buffer->lock);
870}
871
Colin Crossf63958d2013-12-13 19:26:28 -0800872static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800873{
874 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800875 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800876 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800877
878 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800879 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800880 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800881
882 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
883 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800884 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800885 if (ret)
886 return VM_FAULT_ERROR;
887
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800888 return VM_FAULT_NOPAGE;
889}
890
891static void ion_vm_open(struct vm_area_struct *vma)
892{
893 struct ion_buffer *buffer = vma->vm_private_data;
894 struct ion_vma_list *vma_list;
895
Ben Marsh411059f2016-03-28 19:26:19 +0200896 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800897 if (!vma_list)
898 return;
899 vma_list->vma = vma;
900 mutex_lock(&buffer->lock);
901 list_add(&vma_list->list, &buffer->vmas);
902 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800903}
904
905static void ion_vm_close(struct vm_area_struct *vma)
906{
907 struct ion_buffer *buffer = vma->vm_private_data;
908 struct ion_vma_list *vma_list, *tmp;
909
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800910 mutex_lock(&buffer->lock);
911 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
912 if (vma_list->vma != vma)
913 continue;
914 list_del(&vma_list->list);
915 kfree(vma_list);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800916 break;
917 }
918 mutex_unlock(&buffer->lock);
919}
920
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -0700921static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800922 .open = ion_vm_open,
923 .close = ion_vm_close,
924 .fault = ion_vm_fault,
925};
926
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800927static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800928{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800929 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800930 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800931
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800932 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +0200933 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200934 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800935 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800936 }
937
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800938 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -0800939 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
940 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800941 vma->vm_private_data = buffer;
942 vma->vm_ops = &ion_vma_ops;
943 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800944 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800945 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800946
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800947 if (!(buffer->flags & ION_FLAG_CACHED))
948 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
949
950 mutex_lock(&buffer->lock);
951 /* now map it to userspace */
952 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
953 mutex_unlock(&buffer->lock);
954
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800955 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800956 pr_err("%s: failure mapping buffer to userspace\n",
957 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800958
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800959 return ret;
960}
961
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800962static void ion_dma_buf_release(struct dma_buf *dmabuf)
963{
964 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900965
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800966 ion_buffer_put(buffer);
967}
968
969static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
970{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800971 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900972
Greg Hackmann12edf532013-12-13 14:24:00 -0800973 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800974}
975
976static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
977 void *ptr)
978{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800979}
980
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200981static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800982 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800983{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800984 struct ion_buffer *buffer = dmabuf->priv;
985 void *vaddr;
986
987 if (!buffer->heap->ops->map_kernel) {
988 pr_err("%s: map kernel is not implemented by this heap.\n",
989 __func__);
990 return -ENODEV;
991 }
992
993 mutex_lock(&buffer->lock);
994 vaddr = ion_buffer_kmap_get(buffer);
995 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +0530996 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800997}
998
Chris Wilson18b862d2016-03-18 20:02:39 +0000999static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1000 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001001{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001002 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001003
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001004 mutex_lock(&buffer->lock);
1005 ion_buffer_kmap_put(buffer);
1006 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001007
1008 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001009}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001010
Colin Crossf63958d2013-12-13 19:26:28 -08001011static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001012 .map_dma_buf = ion_map_dma_buf,
1013 .unmap_dma_buf = ion_unmap_dma_buf,
1014 .mmap = ion_mmap,
1015 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001016 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1017 .end_cpu_access = ion_dma_buf_end_cpu_access,
1018 .kmap_atomic = ion_dma_buf_kmap,
1019 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001020 .kmap = ion_dma_buf_kmap,
1021 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001022};
1023
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001024static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1025 struct ion_handle *handle,
1026 bool lock_client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001027{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001028 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001029 struct ion_buffer *buffer;
1030 struct dma_buf *dmabuf;
1031 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301032
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001033 if (lock_client)
1034 mutex_lock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001035 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001036 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001037 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001038 if (lock_client)
1039 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001040 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001041 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001042 buffer = handle->buffer;
1043 ion_buffer_get(buffer);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001044 if (lock_client)
1045 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -08001046
Sumit Semwal72449cb2015-02-21 09:00:17 +05301047 exp_info.ops = &dma_buf_ops;
1048 exp_info.size = buffer->size;
1049 exp_info.flags = O_RDWR;
1050 exp_info.priv = buffer;
1051
Sumit Semwald8fbe342015-01-23 12:53:43 +05301052 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001053 if (IS_ERR(dmabuf)) {
1054 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001055 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001056 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001057
1058 return dmabuf;
1059}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001060
1061struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1062 struct ion_handle *handle)
1063{
1064 return __ion_share_dma_buf(client, handle, true);
1065}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001066EXPORT_SYMBOL(ion_share_dma_buf);
1067
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001068static int __ion_share_dma_buf_fd(struct ion_client *client,
1069 struct ion_handle *handle, bool lock_client)
Johan Mossberg22ba4322013-12-13 14:24:34 -08001070{
1071 struct dma_buf *dmabuf;
1072 int fd;
1073
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001074 dmabuf = __ion_share_dma_buf(client, handle, lock_client);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001075 if (IS_ERR(dmabuf))
1076 return PTR_ERR(dmabuf);
1077
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001078 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001079 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001080 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001081
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001082 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001083}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001084
1085int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1086{
1087 return __ion_share_dma_buf_fd(client, handle, true);
1088}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001089EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001090
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001091int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1092 struct ion_handle *handle)
1093{
1094 return __ion_share_dma_buf_fd(client, handle, false);
1095}
1096
Rohit kumar9f903812016-01-12 09:31:46 +05301097struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1098 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001099{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001100 struct ion_buffer *buffer;
1101 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001102 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001103
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001104 /* if this memory came from ion */
1105
1106 if (dmabuf->ops != &dma_buf_ops) {
1107 pr_err("%s: can not import dmabuf from another exporter\n",
1108 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001109 return ERR_PTR(-EINVAL);
1110 }
1111 buffer = dmabuf->priv;
1112
1113 mutex_lock(&client->lock);
1114 /* if a handle exists for this buffer just take a reference to it */
1115 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001116 if (!IS_ERR(handle)) {
Daniel Rosenberg3af20bd2018-08-30 16:09:46 -07001117 handle = ion_handle_get_check_overflow(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001118 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001119 goto end;
1120 }
Colin Cross83271f62013-12-13 14:24:59 -08001121
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001122 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001123 if (IS_ERR(handle)) {
1124 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001125 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001126 }
Colin Cross83271f62013-12-13 14:24:59 -08001127
Colin Cross47b40452013-12-13 14:24:50 -08001128 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001129 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001130 if (ret) {
1131 ion_handle_put(handle);
1132 handle = ERR_PTR(ret);
1133 }
Colin Cross83271f62013-12-13 14:24:59 -08001134
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001135end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001136 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001137}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001138EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001139
Rohit kumar9f903812016-01-12 09:31:46 +05301140struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1141{
1142 struct dma_buf *dmabuf;
1143 struct ion_handle *handle;
1144
1145 dmabuf = dma_buf_get(fd);
1146 if (IS_ERR(dmabuf))
1147 return ERR_CAST(dmabuf);
1148
1149 handle = ion_import_dma_buf(client, dmabuf);
1150 dma_buf_put(dmabuf);
1151 return handle;
1152}
1153EXPORT_SYMBOL(ion_import_dma_buf_fd);
1154
Laura Abbottb1fa6d82016-09-07 11:49:58 -07001155int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001156{
1157 struct dma_buf *dmabuf;
1158 struct ion_buffer *buffer;
1159
1160 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001161 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001162 return PTR_ERR(dmabuf);
1163
1164 /* if this memory came from ion */
1165 if (dmabuf->ops != &dma_buf_ops) {
1166 pr_err("%s: can not sync dmabuf from another exporter\n",
1167 __func__);
1168 dma_buf_put(dmabuf);
1169 return -EINVAL;
1170 }
1171 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001172
1173 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1174 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001175 dma_buf_put(dmabuf);
1176 return 0;
1177}
1178
Laura Abbott02b23802016-09-07 11:49:59 -07001179int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1180{
1181 struct ion_device *dev = client->dev;
1182 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1183 int ret = -EINVAL, cnt = 0, max_cnt;
1184 struct ion_heap *heap;
1185 struct ion_heap_data hdata;
1186
1187 memset(&hdata, 0, sizeof(hdata));
1188
1189 down_read(&dev->lock);
1190 if (!buffer) {
1191 query->cnt = dev->heap_cnt;
1192 ret = 0;
1193 goto out;
1194 }
1195
1196 if (query->cnt <= 0)
1197 goto out;
1198
1199 max_cnt = query->cnt;
1200
1201 plist_for_each_entry(heap, &dev->heaps, node) {
1202 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1203 hdata.name[sizeof(hdata.name) - 1] = '\0';
1204 hdata.type = heap->type;
1205 hdata.heap_id = heap->id;
1206
Dan Carpentercf559022016-10-13 15:55:08 +03001207 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1208 ret = -EFAULT;
1209 goto out;
1210 }
Laura Abbott02b23802016-09-07 11:49:59 -07001211
1212 cnt++;
1213 if (cnt >= max_cnt)
1214 break;
1215 }
1216
1217 query->cnt = cnt;
1218out:
1219 up_read(&dev->lock);
1220 return ret;
1221}
1222
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001223static int ion_release(struct inode *inode, struct file *file)
1224{
1225 struct ion_client *client = file->private_data;
1226
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001227 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001228 return 0;
1229}
1230
1231static int ion_open(struct inode *inode, struct file *file)
1232{
1233 struct miscdevice *miscdev = file->private_data;
1234 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1235 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001236 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001237
Laura Abbott483ed032014-02-17 13:58:35 -08001238 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1239 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001240 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001241 return PTR_ERR(client);
1242 file->private_data = client;
1243
1244 return 0;
1245}
1246
1247static const struct file_operations ion_fops = {
1248 .owner = THIS_MODULE,
1249 .open = ion_open,
1250 .release = ion_release,
1251 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001252 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001253};
1254
1255static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001256 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001257{
1258 size_t size = 0;
1259 struct rb_node *n;
1260
1261 mutex_lock(&client->lock);
1262 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1263 struct ion_handle *handle = rb_entry(n,
1264 struct ion_handle,
1265 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001266 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001267 size += handle->buffer->size;
1268 }
1269 mutex_unlock(&client->lock);
1270 return size;
1271}
1272
1273static int ion_debug_heap_show(struct seq_file *s, void *unused)
1274{
1275 struct ion_heap *heap = s->private;
1276 struct ion_device *dev = heap->dev;
1277 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001278 size_t total_size = 0;
1279 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001280
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001281 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001282 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001283
Neil Zhang948c4db2016-01-26 17:39:06 +08001284 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001285 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001286 struct ion_client *client = rb_entry(n, struct ion_client,
1287 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001288 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001289
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001290 if (!size)
1291 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001292 if (client->task) {
1293 char task_comm[TASK_COMM_LEN];
1294
1295 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001296 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001297 client->pid, size);
1298 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001299 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001300 client->pid, size);
1301 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001302 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001303 mutex_unlock(&debugfs_mutex);
1304
Iulia Manda164ad862014-03-11 20:12:29 +02001305 seq_puts(s, "----------------------------------------------------\n");
1306 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001307 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001308 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1309 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1310 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001311 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001312 continue;
1313 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001314 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001315 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001316 buffer->task_comm, buffer->pid,
1317 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001318 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001319 total_orphaned_size += buffer->size;
1320 }
1321 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001322 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001323 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001324 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001325 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001326 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001327 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001328 seq_printf(s, "%16s %16zu\n", "deferred free",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001329 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001330 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001331
1332 if (heap->debug_show)
1333 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001334
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001335 return 0;
1336}
1337
1338static int ion_debug_heap_open(struct inode *inode, struct file *file)
1339{
1340 return single_open(file, ion_debug_heap_show, inode->i_private);
1341}
1342
1343static const struct file_operations debug_heap_fops = {
1344 .open = ion_debug_heap_open,
1345 .read = seq_read,
1346 .llseek = seq_lseek,
1347 .release = single_release,
1348};
1349
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001350static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001351{
John Stultze1d855b2013-12-13 19:26:33 -08001352 struct ion_heap *heap = data;
1353 struct shrink_control sc;
1354 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001355
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001356 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001357 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001358
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001359 if (!val) {
1360 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1361 sc.nr_to_scan = objs;
1362 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001363
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001364 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001365 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001366}
1367
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001368static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001369{
John Stultze1d855b2013-12-13 19:26:33 -08001370 struct ion_heap *heap = data;
1371 struct shrink_control sc;
1372 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001373
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001374 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001375 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001376
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001377 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001378 *val = objs;
1379 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001380}
1381
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001382DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001383 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001384
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001385void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1386{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001387 struct dentry *debug_file;
1388
Laura Abbottf82ad602016-08-08 09:52:56 -07001389 if (!heap->ops->allocate || !heap->ops->free)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001390 pr_err("%s: can not add heap with invalid ops struct.\n",
1391 __func__);
1392
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001393 spin_lock_init(&heap->free_lock);
1394 heap->free_list_size = 0;
1395
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001396 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1397 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001398
Colin Crossb9daf0b2014-02-17 13:58:38 -08001399 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1400 ion_heap_init_shrinker(heap);
1401
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001402 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001403 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301404 /*
1405 * use negative heap->id to reverse the priority -- when traversing
1406 * the list later attempt higher id numbers first
1407 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001408 plist_node_init(&heap->node, -heap->id);
1409 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001410 debug_file = debugfs_create_file(heap->name, 0664,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001411 dev->heaps_debug_root, heap,
1412 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001413
1414 if (!debug_file) {
1415 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001416
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001417 path = dentry_path(dev->heaps_debug_root, buf, 256);
1418 pr_err("Failed to create heap debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001419 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001420 }
1421
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001422 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001423 char debug_name[64];
1424
1425 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001426 debug_file = debugfs_create_file(
1427 debug_name, 0644, dev->heaps_debug_root, heap,
1428 &debug_shrink_fops);
1429 if (!debug_file) {
1430 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001431
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001432 path = dentry_path(dev->heaps_debug_root, buf, 256);
1433 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001434 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001435 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001436 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001437
Laura Abbott02b23802016-09-07 11:49:59 -07001438 dev->heap_cnt++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001439 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001440}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001441EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001442
1443struct ion_device *ion_device_create(long (*custom_ioctl)
1444 (struct ion_client *client,
1445 unsigned int cmd,
1446 unsigned long arg))
1447{
1448 struct ion_device *idev;
1449 int ret;
1450
Ben Marsh411059f2016-03-28 19:26:19 +02001451 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001452 if (!idev)
1453 return ERR_PTR(-ENOMEM);
1454
1455 idev->dev.minor = MISC_DYNAMIC_MINOR;
1456 idev->dev.name = "ion";
1457 idev->dev.fops = &ion_fops;
1458 idev->dev.parent = NULL;
1459 ret = misc_register(&idev->dev);
1460 if (ret) {
1461 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301462 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001463 return ERR_PTR(ret);
1464 }
1465
1466 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001467 if (!idev->debug_root) {
1468 pr_err("ion: failed to create debugfs root directory.\n");
1469 goto debugfs_done;
1470 }
1471 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1472 if (!idev->heaps_debug_root) {
1473 pr_err("ion: failed to create debugfs heaps directory.\n");
1474 goto debugfs_done;
1475 }
1476 idev->clients_debug_root = debugfs_create_dir("clients",
1477 idev->debug_root);
1478 if (!idev->clients_debug_root)
1479 pr_err("ion: failed to create debugfs clients directory.\n");
1480
1481debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001482
1483 idev->custom_ioctl = custom_ioctl;
1484 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001485 mutex_init(&idev->buffer_lock);
1486 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001487 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001488 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001489 ion_root_client = &idev->clients;
1490 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001491 return idev;
1492}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001493EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001494
1495void ion_device_destroy(struct ion_device *dev)
1496{
1497 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001498 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001499 /* XXX need to free the heaps and clients ? */
1500 kfree(dev);
1501}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001502EXPORT_SYMBOL(ion_device_destroy);