blob: 396ded52ab70242b8b3c069c52cc7fed2c44624d [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053019#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080021#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080024#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080026#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080032#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080035#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080036#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080037#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080038#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039
40#include "ion.h"
41#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080042#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080043
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080044bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
45{
John Stultze1d855b2013-12-13 19:26:33 -080046 return (buffer->flags & ION_FLAG_CACHED) &&
47 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080048}
49
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080050bool ion_buffer_cached(struct ion_buffer *buffer)
51{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080052 return !!(buffer->flags & ION_FLAG_CACHED);
53}
54
55static inline struct page *ion_buffer_page(struct page *page)
56{
57 return (struct page *)((unsigned long)page & ~(1UL));
58}
59
60static inline bool ion_buffer_page_is_dirty(struct page *page)
61{
62 return !!((unsigned long)page & 1UL);
63}
64
65static inline void ion_buffer_page_dirty(struct page **page)
66{
67 *page = (struct page *)((unsigned long)(*page) | 1UL);
68}
69
70static inline void ion_buffer_page_clean(struct page **page)
71{
72 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080073}
74
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080075/* this function should only be called while dev->lock is held */
76static void ion_buffer_add(struct ion_device *dev,
77 struct ion_buffer *buffer)
78{
79 struct rb_node **p = &dev->buffers.rb_node;
80 struct rb_node *parent = NULL;
81 struct ion_buffer *entry;
82
83 while (*p) {
84 parent = *p;
85 entry = rb_entry(parent, struct ion_buffer, node);
86
87 if (buffer < entry) {
88 p = &(*p)->rb_left;
89 } else if (buffer > entry) {
90 p = &(*p)->rb_right;
91 } else {
92 pr_err("%s: buffer already found.", __func__);
93 BUG();
94 }
95 }
96
97 rb_link_node(&buffer->node, parent, p);
98 rb_insert_color(&buffer->node, &dev->buffers);
99}
100
101/* this function should only be called while dev->lock is held */
102static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200103 struct ion_device *dev,
104 unsigned long len,
105 unsigned long align,
106 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800107{
108 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800109 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800110 struct scatterlist *sg;
111 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800112
Ben Marsh411059f2016-03-28 19:26:19 +0200113 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800114 if (!buffer)
115 return ERR_PTR(-ENOMEM);
116
117 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800118 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800119 kref_init(&buffer->ref);
120
121 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800122
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800123 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800124 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
125 goto err2;
126
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800127 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800128 ret = heap->ops->allocate(heap, buffer, len, align,
129 flags);
130 if (ret)
131 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800132 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800133
Laura Abbottf82ad602016-08-08 09:52:56 -0700134 if (buffer->sg_table == NULL) {
135 WARN_ONCE(1, "This heap needs to set the sgtable");
Rohit kumara56d0922015-09-30 11:07:35 +0530136 ret = -EINVAL;
137 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800138 }
Rohit kumara56d0922015-09-30 11:07:35 +0530139
Laura Abbottf82ad602016-08-08 09:52:56 -0700140 table = buffer->sg_table;
141 buffer->dev = dev;
142 buffer->size = len;
143
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800144 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800145 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
146 struct scatterlist *sg;
147 int i, j, k = 0;
148
149 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
150 if (!buffer->pages) {
151 ret = -ENOMEM;
Laura Abbottf82ad602016-08-08 09:52:56 -0700152 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800153 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800154
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800155 for_each_sg(table->sgl, sg, table->nents, i) {
156 struct page *page = sg_page(sg);
157
Colin Cross06e0dca2013-12-13 14:25:02 -0800158 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800159 buffer->pages[k++] = page++;
160 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800161 }
162
163 buffer->dev = dev;
164 buffer->size = len;
165 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800166 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530167 /*
168 * this will set up dma addresses for the sglist -- it is not
169 * technically correct as per the dma api -- a specific
170 * device isn't really taking ownership here. However, in practice on
171 * our systems the only dma_address space is physical addresses.
172 * Additionally, we can't afford the overhead of invalidating every
173 * allocation via dma_map_sg. The implicit contract here is that
174 * memory coming from the heaps is ready for dma, ie if it has a
175 * cached mapping that mapping has been invalidated
176 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000177 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800178 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000179 sg_dma_len(sg) = sg->length;
180 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800181 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800182 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800183 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800184 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800185
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800186err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530187 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800188err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800189 kfree(buffer);
190 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800191}
192
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800193void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800194{
195 if (WARN_ON(buffer->kmap_cnt > 0))
196 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800197 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100198 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800199 kfree(buffer);
200}
201
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800202static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800203{
204 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800205 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206 struct ion_device *dev = buffer->dev;
207
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800208 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800209 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800210 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800211
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800212 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
213 ion_heap_freelist_add(heap, buffer);
214 else
215 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800216}
217
218static void ion_buffer_get(struct ion_buffer *buffer)
219{
220 kref_get(&buffer->ref);
221}
222
223static int ion_buffer_put(struct ion_buffer *buffer)
224{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800225 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800226}
227
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800228static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
229{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800230 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800231 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800232 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800233}
234
235static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
236{
237 /*
238 * when a buffer is removed from a handle, if it is not in
239 * any other handles, copy the taskcomm and the pid of the
240 * process it's being removed from into the buffer. At this
241 * point there will be no way to track what processes this buffer is
242 * being used by, it only exists as a dma_buf file descriptor.
243 * The taskcomm and pid can provide a debug hint as to where this fd
244 * is in the system
245 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800246 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800247 buffer->handle_count--;
248 BUG_ON(buffer->handle_count < 0);
249 if (!buffer->handle_count) {
250 struct task_struct *task;
251
252 task = current->group_leader;
253 get_task_comm(buffer->task_comm, task);
254 buffer->pid = task_pid_nr(task);
255 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800256 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800257}
258
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800259static struct ion_handle *ion_handle_create(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200260 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800261{
262 struct ion_handle *handle;
263
Ben Marsh411059f2016-03-28 19:26:19 +0200264 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800265 if (!handle)
266 return ERR_PTR(-ENOMEM);
267 kref_init(&handle->ref);
268 RB_CLEAR_NODE(&handle->node);
269 handle->client = client;
270 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800271 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800272 handle->buffer = buffer;
273
274 return handle;
275}
276
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800277static void ion_handle_kmap_put(struct ion_handle *);
278
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800279static void ion_handle_destroy(struct kref *kref)
280{
281 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800282 struct ion_client *client = handle->client;
283 struct ion_buffer *buffer = handle->buffer;
284
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800285 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800286 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800287 ion_handle_kmap_put(handle);
288 mutex_unlock(&buffer->lock);
289
Colin Cross47b40452013-12-13 14:24:50 -0800290 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800291 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800292 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800293
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800294 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800295 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800296
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800297 kfree(handle);
298}
299
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300static void ion_handle_get(struct ion_handle *handle)
301{
302 kref_get(&handle->ref);
303}
304
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700305int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000306{
Johanna Abrahamsson45052462016-08-24 00:41:54 +0200307 return kref_put(&handle->ref, ion_handle_destroy);
EunTaik Lee95902322016-02-24 04:38:06 +0000308}
309
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700310int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800311{
Colin Cross83271f62013-12-13 14:24:59 -0800312 struct ion_client *client = handle->client;
313 int ret;
314
315 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000316 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800317 mutex_unlock(&client->lock);
318
319 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800320}
321
322static struct ion_handle *ion_handle_lookup(struct ion_client *client,
323 struct ion_buffer *buffer)
324{
Colin Crosse1cf3682013-12-13 14:24:51 -0800325 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800326
Colin Crosse1cf3682013-12-13 14:24:51 -0800327 while (n) {
328 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900329
Colin Crosse1cf3682013-12-13 14:24:51 -0800330 if (buffer < entry->buffer)
331 n = n->rb_left;
332 else if (buffer > entry->buffer)
333 n = n->rb_right;
334 else
335 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800336 }
Colin Cross9e907652013-12-13 14:24:49 -0800337 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800338}
339
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700340struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
341 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000342{
343 struct ion_handle *handle;
344
345 handle = idr_find(&client->idr, id);
346 if (handle)
347 ion_handle_get(handle);
348
349 return handle ? handle : ERR_PTR(-EINVAL);
350}
351
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700352struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
Markus Böhme0045c8d2016-04-06 23:53:39 +0200353 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800354{
Colin Cross83271f62013-12-13 14:24:59 -0800355 struct ion_handle *handle;
356
357 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000358 handle = ion_handle_get_by_id_nolock(client, id);
Colin Cross83271f62013-12-13 14:24:59 -0800359 mutex_unlock(&client->lock);
360
EunTaik Lee95902322016-02-24 04:38:06 +0000361 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800362}
363
John Stultze1d855b2013-12-13 19:26:33 -0800364static bool ion_handle_validate(struct ion_client *client,
365 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800366{
Colin Cross83271f62013-12-13 14:24:59 -0800367 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900368 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800369}
370
371static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
372{
Colin Crossb26661d2013-12-13 14:25:05 -0800373 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800374 struct rb_node **p = &client->handles.rb_node;
375 struct rb_node *parent = NULL;
376 struct ion_handle *entry;
377
Colin Crossb26661d2013-12-13 14:25:05 -0800378 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
379 if (id < 0)
380 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800381
Colin Crossb26661d2013-12-13 14:25:05 -0800382 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800383
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800384 while (*p) {
385 parent = *p;
386 entry = rb_entry(parent, struct ion_handle, node);
387
Colin Crosse1cf3682013-12-13 14:24:51 -0800388 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800389 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800390 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800391 p = &(*p)->rb_right;
392 else
393 WARN(1, "%s: buffer already found.", __func__);
394 }
395
396 rb_link_node(&handle->node, parent, p);
397 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800398
399 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800400}
401
402struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800403 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800404 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800405{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800406 struct ion_handle *handle;
407 struct ion_device *dev = client->dev;
408 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800409 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800410 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800411
Colin Crosse61fc912013-12-13 19:26:14 -0800412 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800413 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800414 /*
415 * traverse the list of heaps available in this system in priority
416 * order. If the heap type is supported by the client, and matches the
417 * request of the caller allocate from it. Repeat until allocate has
418 * succeeded or all heaps have been tried
419 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800420 len = PAGE_ALIGN(len);
421
Colin Crossa14baf72013-12-13 14:25:00 -0800422 if (!len)
423 return ERR_PTR(-EINVAL);
424
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800425 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800426 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800427 /* if the caller didn't specify this heap id */
428 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800429 continue;
430 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800431 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800432 break;
433 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800434 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800435
KyongHo Cho54ac07842013-12-13 14:23:39 -0800436 if (buffer == NULL)
437 return ERR_PTR(-ENODEV);
438
439 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200440 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800441
442 handle = ion_handle_create(client, buffer);
443
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800444 /*
445 * ion_buffer_create will create a buffer with a ref_cnt of 1,
446 * and ion_handle_create will take a second reference, drop one here
447 */
448 ion_buffer_put(buffer);
449
Colin Cross47b40452013-12-13 14:24:50 -0800450 if (IS_ERR(handle))
451 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800452
Colin Cross47b40452013-12-13 14:24:50 -0800453 mutex_lock(&client->lock);
454 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800455 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800456 if (ret) {
457 ion_handle_put(handle);
458 handle = ERR_PTR(ret);
459 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800460
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800461 return handle;
462}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800463EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800464
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700465void ion_free_nolock(struct ion_client *client,
466 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800467{
Johanna Abrahamssonc2bbedf2016-08-24 00:02:45 +0200468 if (!ion_handle_validate(client, handle)) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800469 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800470 return;
471 }
EunTaik Lee95902322016-02-24 04:38:06 +0000472 ion_handle_put_nolock(handle);
473}
474
475void ion_free(struct ion_client *client, struct ion_handle *handle)
476{
477 BUG_ON(client != handle->client);
478
479 mutex_lock(&client->lock);
480 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800481 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800482}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800483EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800485static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
486{
487 void *vaddr;
488
489 if (buffer->kmap_cnt) {
490 buffer->kmap_cnt++;
491 return buffer->vaddr;
492 }
493 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800494 if (WARN_ONCE(vaddr == NULL,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200495 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800496 return ERR_PTR(-EINVAL);
497 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800498 return vaddr;
499 buffer->vaddr = vaddr;
500 buffer->kmap_cnt++;
501 return vaddr;
502}
503
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800504static void *ion_handle_kmap_get(struct ion_handle *handle)
505{
506 struct ion_buffer *buffer = handle->buffer;
507 void *vaddr;
508
509 if (handle->kmap_cnt) {
510 handle->kmap_cnt++;
511 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800512 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800513 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800514 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800515 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800516 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800517 return vaddr;
518}
519
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800520static void ion_buffer_kmap_put(struct ion_buffer *buffer)
521{
522 buffer->kmap_cnt--;
523 if (!buffer->kmap_cnt) {
524 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
525 buffer->vaddr = NULL;
526 }
527}
528
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800529static void ion_handle_kmap_put(struct ion_handle *handle)
530{
531 struct ion_buffer *buffer = handle->buffer;
532
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700533 if (!handle->kmap_cnt) {
534 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
535 return;
536 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800537 handle->kmap_cnt--;
538 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800539 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800540}
541
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800542void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
543{
544 struct ion_buffer *buffer;
545 void *vaddr;
546
547 mutex_lock(&client->lock);
548 if (!ion_handle_validate(client, handle)) {
549 pr_err("%s: invalid handle passed to map_kernel.\n",
550 __func__);
551 mutex_unlock(&client->lock);
552 return ERR_PTR(-EINVAL);
553 }
554
555 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800556
557 if (!handle->buffer->heap->ops->map_kernel) {
558 pr_err("%s: map_kernel is not implemented by this heap.\n",
559 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800560 mutex_unlock(&client->lock);
561 return ERR_PTR(-ENODEV);
562 }
563
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800564 mutex_lock(&buffer->lock);
565 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800566 mutex_unlock(&buffer->lock);
567 mutex_unlock(&client->lock);
568 return vaddr;
569}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800570EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800571
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800572void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
573{
574 struct ion_buffer *buffer;
575
576 mutex_lock(&client->lock);
577 buffer = handle->buffer;
578 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800579 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800580 mutex_unlock(&buffer->lock);
581 mutex_unlock(&client->lock);
582}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800583EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800584
Neil Zhang948c4db2016-01-26 17:39:06 +0800585static struct mutex debugfs_mutex;
586static struct rb_root *ion_root_client;
587static int is_client_alive(struct ion_client *client)
588{
589 struct rb_node *node;
590 struct ion_client *tmp;
591 struct ion_device *dev;
592
593 node = ion_root_client->rb_node;
594 dev = container_of(ion_root_client, struct ion_device, clients);
595
596 down_read(&dev->lock);
597 while (node) {
598 tmp = rb_entry(node, struct ion_client, node);
599 if (client < tmp) {
600 node = node->rb_left;
601 } else if (client > tmp) {
602 node = node->rb_right;
603 } else {
604 up_read(&dev->lock);
605 return 1;
606 }
607 }
608
609 up_read(&dev->lock);
610 return 0;
611}
612
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800613static int ion_debug_client_show(struct seq_file *s, void *unused)
614{
615 struct ion_client *client = s->private;
616 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800617 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800618 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800619 int i;
620
Neil Zhang948c4db2016-01-26 17:39:06 +0800621 mutex_lock(&debugfs_mutex);
622 if (!is_client_alive(client)) {
623 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
624 client);
625 mutex_unlock(&debugfs_mutex);
626 return 0;
627 }
628
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800629 mutex_lock(&client->lock);
630 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
631 struct ion_handle *handle = rb_entry(n, struct ion_handle,
632 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800633 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800634
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800635 if (!names[id])
636 names[id] = handle->buffer->heap->name;
637 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800638 }
639 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800640 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800641
642 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800643 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800644 if (!names[i])
645 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800646 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800647 }
648 return 0;
649}
650
651static int ion_debug_client_open(struct inode *inode, struct file *file)
652{
653 return single_open(file, ion_debug_client_show, inode->i_private);
654}
655
656static const struct file_operations debug_client_fops = {
657 .open = ion_debug_client_open,
658 .read = seq_read,
659 .llseek = seq_lseek,
660 .release = single_release,
661};
662
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800663static int ion_get_client_serial(const struct rb_root *root,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200664 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800665{
666 int serial = -1;
667 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900668
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800669 for (node = rb_first(root); node; node = rb_next(node)) {
670 struct ion_client *client = rb_entry(node, struct ion_client,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700671 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900672
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800673 if (strcmp(client->name, name))
674 continue;
675 serial = max(serial, client->display_serial);
676 }
677 return serial + 1;
678}
679
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800680struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800681 const char *name)
682{
683 struct ion_client *client;
684 struct task_struct *task;
685 struct rb_node **p;
686 struct rb_node *parent = NULL;
687 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800688 pid_t pid;
689
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800690 if (!name) {
691 pr_err("%s: Name cannot be null\n", __func__);
692 return ERR_PTR(-EINVAL);
693 }
694
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800695 get_task_struct(current->group_leader);
696 task_lock(current->group_leader);
697 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530698 /*
699 * don't bother to store task struct for kernel threads,
700 * they can't be killed anyway
701 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800702 if (current->group_leader->flags & PF_KTHREAD) {
703 put_task_struct(current->group_leader);
704 task = NULL;
705 } else {
706 task = current->group_leader;
707 }
708 task_unlock(current->group_leader);
709
Ben Marsh411059f2016-03-28 19:26:19 +0200710 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800711 if (!client)
712 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800713
714 client->dev = dev;
715 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800716 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800717 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800718 client->task = task;
719 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800720 client->name = kstrdup(name, GFP_KERNEL);
721 if (!client->name)
722 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800723
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800724 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800725 client->display_serial = ion_get_client_serial(&dev->clients, name);
726 client->display_name = kasprintf(
727 GFP_KERNEL, "%s-%d", name, client->display_serial);
728 if (!client->display_name) {
729 up_write(&dev->lock);
730 goto err_free_client_name;
731 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800732 p = &dev->clients.rb_node;
733 while (*p) {
734 parent = *p;
735 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800736
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800737 if (client < entry)
738 p = &(*p)->rb_left;
739 else if (client > entry)
740 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800741 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800742 rb_link_node(&client->node, parent, p);
743 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800744
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800745 client->debug_root = debugfs_create_file(client->display_name, 0664,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700746 dev->clients_debug_root,
747 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800748 if (!client->debug_root) {
749 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700750
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800751 path = dentry_path(dev->clients_debug_root, buf, 256);
752 pr_err("Failed to create client debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200753 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800754 }
755
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800756 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800757
758 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800759
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800760err_free_client_name:
761 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800762err_free_client:
763 kfree(client);
764err_put_task_struct:
765 if (task)
766 put_task_struct(current->group_leader);
767 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800768}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800769EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800770
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800771void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800772{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800773 struct ion_device *dev = client->dev;
774 struct rb_node *n;
775
776 pr_debug("%s: %d\n", __func__, __LINE__);
Neil Zhang948c4db2016-01-26 17:39:06 +0800777 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800778 while ((n = rb_first(&client->handles))) {
779 struct ion_handle *handle = rb_entry(n, struct ion_handle,
780 node);
781 ion_handle_destroy(&handle->ref);
782 }
Colin Cross47b40452013-12-13 14:24:50 -0800783
Colin Cross47b40452013-12-13 14:24:50 -0800784 idr_destroy(&client->idr);
785
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800786 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800787 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800788 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800789 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800790 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800791 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800792
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800793 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800794 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800795 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800796 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800797}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800798EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800799
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800800static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
801 struct device *dev,
802 enum dma_data_direction direction);
803
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800804static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
805 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800806{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800807 struct dma_buf *dmabuf = attachment->dmabuf;
808 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800809
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800810 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800811 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800812}
813
814static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
815 struct sg_table *table,
816 enum dma_data_direction direction)
817{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800818}
819
Colin Crosse946b202013-12-13 14:25:01 -0800820void ion_pages_sync_for_device(struct device *dev, struct page *page,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200821 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -0800822{
823 struct scatterlist sg;
824
825 sg_init_table(&sg, 1);
826 sg_set_page(&sg, page, size, 0);
827 /*
828 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530829 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800830 * hardware.
831 */
832 sg_dma_address(&sg) = page_to_phys(page);
833 dma_sync_sg_for_device(dev, &sg, 1, dir);
834}
835
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800836struct ion_vma_list {
837 struct list_head list;
838 struct vm_area_struct *vma;
839};
840
841static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
842 struct device *dev,
843 enum dma_data_direction dir)
844{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800845 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800846 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
847 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800848
849 pr_debug("%s: syncing for device %s\n", __func__,
850 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800851
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800852 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800853 return;
854
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800855 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800856 for (i = 0; i < pages; i++) {
857 struct page *page = buffer->pages[i];
858
859 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800860 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200861 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -0800862
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800863 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800864 }
865 list_for_each_entry(vma_list, &buffer->vmas, list) {
866 struct vm_area_struct *vma = vma_list->vma;
867
868 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
869 NULL);
870 }
871 mutex_unlock(&buffer->lock);
872}
873
Colin Crossf63958d2013-12-13 19:26:28 -0800874static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800875{
876 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800877 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800878 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800879
880 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800881 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800882 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800883
884 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
885 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800886 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800887 if (ret)
888 return VM_FAULT_ERROR;
889
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800890 return VM_FAULT_NOPAGE;
891}
892
893static void ion_vm_open(struct vm_area_struct *vma)
894{
895 struct ion_buffer *buffer = vma->vm_private_data;
896 struct ion_vma_list *vma_list;
897
Ben Marsh411059f2016-03-28 19:26:19 +0200898 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800899 if (!vma_list)
900 return;
901 vma_list->vma = vma;
902 mutex_lock(&buffer->lock);
903 list_add(&vma_list->list, &buffer->vmas);
904 mutex_unlock(&buffer->lock);
905 pr_debug("%s: adding %p\n", __func__, vma);
906}
907
908static void ion_vm_close(struct vm_area_struct *vma)
909{
910 struct ion_buffer *buffer = vma->vm_private_data;
911 struct ion_vma_list *vma_list, *tmp;
912
913 pr_debug("%s\n", __func__);
914 mutex_lock(&buffer->lock);
915 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
916 if (vma_list->vma != vma)
917 continue;
918 list_del(&vma_list->list);
919 kfree(vma_list);
920 pr_debug("%s: deleting %p\n", __func__, vma);
921 break;
922 }
923 mutex_unlock(&buffer->lock);
924}
925
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -0700926static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800927 .open = ion_vm_open,
928 .close = ion_vm_close,
929 .fault = ion_vm_fault,
930};
931
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800932static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800933{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800934 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800935 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800936
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800937 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +0200938 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200939 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800940 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800941 }
942
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800943 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -0800944 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
945 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800946 vma->vm_private_data = buffer;
947 vma->vm_ops = &ion_vma_ops;
948 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800949 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800950 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800951
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800952 if (!(buffer->flags & ION_FLAG_CACHED))
953 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
954
955 mutex_lock(&buffer->lock);
956 /* now map it to userspace */
957 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
958 mutex_unlock(&buffer->lock);
959
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800960 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800961 pr_err("%s: failure mapping buffer to userspace\n",
962 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800963
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800964 return ret;
965}
966
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800967static void ion_dma_buf_release(struct dma_buf *dmabuf)
968{
969 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900970
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800971 ion_buffer_put(buffer);
972}
973
974static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
975{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800976 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900977
Greg Hackmann12edf532013-12-13 14:24:00 -0800978 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800979}
980
981static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
982 void *ptr)
983{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800984}
985
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200986static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800987 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800988{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800989 struct ion_buffer *buffer = dmabuf->priv;
990 void *vaddr;
991
992 if (!buffer->heap->ops->map_kernel) {
993 pr_err("%s: map kernel is not implemented by this heap.\n",
994 __func__);
995 return -ENODEV;
996 }
997
998 mutex_lock(&buffer->lock);
999 vaddr = ion_buffer_kmap_get(buffer);
1000 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301001 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001002}
1003
Chris Wilson18b862d2016-03-18 20:02:39 +00001004static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1005 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001006{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001007 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001008
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001009 mutex_lock(&buffer->lock);
1010 ion_buffer_kmap_put(buffer);
1011 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001012
1013 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001014}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001015
Colin Crossf63958d2013-12-13 19:26:28 -08001016static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001017 .map_dma_buf = ion_map_dma_buf,
1018 .unmap_dma_buf = ion_unmap_dma_buf,
1019 .mmap = ion_mmap,
1020 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001021 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1022 .end_cpu_access = ion_dma_buf_end_cpu_access,
1023 .kmap_atomic = ion_dma_buf_kmap,
1024 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001025 .kmap = ion_dma_buf_kmap,
1026 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001027};
1028
Johan Mossberg22ba4322013-12-13 14:24:34 -08001029struct dma_buf *ion_share_dma_buf(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001030 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001031{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001032 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001033 struct ion_buffer *buffer;
1034 struct dma_buf *dmabuf;
1035 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301036
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001037 mutex_lock(&client->lock);
1038 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001039 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001040 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001041 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001042 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001043 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001044 buffer = handle->buffer;
1045 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001046 mutex_unlock(&client->lock);
1047
Sumit Semwal72449cb2015-02-21 09:00:17 +05301048 exp_info.ops = &dma_buf_ops;
1049 exp_info.size = buffer->size;
1050 exp_info.flags = O_RDWR;
1051 exp_info.priv = buffer;
1052
Sumit Semwald8fbe342015-01-23 12:53:43 +05301053 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001054 if (IS_ERR(dmabuf)) {
1055 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001056 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001057 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001058
1059 return dmabuf;
1060}
1061EXPORT_SYMBOL(ion_share_dma_buf);
1062
1063int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1064{
1065 struct dma_buf *dmabuf;
1066 int fd;
1067
1068 dmabuf = ion_share_dma_buf(client, handle);
1069 if (IS_ERR(dmabuf))
1070 return PTR_ERR(dmabuf);
1071
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001072 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001073 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001074 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001075
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001076 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001077}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001078EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001079
Rohit kumar9f903812016-01-12 09:31:46 +05301080struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1081 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001082{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001083 struct ion_buffer *buffer;
1084 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001085 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001086
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001087 /* if this memory came from ion */
1088
1089 if (dmabuf->ops != &dma_buf_ops) {
1090 pr_err("%s: can not import dmabuf from another exporter\n",
1091 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001092 return ERR_PTR(-EINVAL);
1093 }
1094 buffer = dmabuf->priv;
1095
1096 mutex_lock(&client->lock);
1097 /* if a handle exists for this buffer just take a reference to it */
1098 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001099 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001100 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001101 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001102 goto end;
1103 }
Colin Cross83271f62013-12-13 14:24:59 -08001104
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001105 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001106 if (IS_ERR(handle)) {
1107 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001108 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001109 }
Colin Cross83271f62013-12-13 14:24:59 -08001110
Colin Cross47b40452013-12-13 14:24:50 -08001111 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001112 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001113 if (ret) {
1114 ion_handle_put(handle);
1115 handle = ERR_PTR(ret);
1116 }
Colin Cross83271f62013-12-13 14:24:59 -08001117
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001118end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001119 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001120}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001121EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001122
Rohit kumar9f903812016-01-12 09:31:46 +05301123struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1124{
1125 struct dma_buf *dmabuf;
1126 struct ion_handle *handle;
1127
1128 dmabuf = dma_buf_get(fd);
1129 if (IS_ERR(dmabuf))
1130 return ERR_CAST(dmabuf);
1131
1132 handle = ion_import_dma_buf(client, dmabuf);
1133 dma_buf_put(dmabuf);
1134 return handle;
1135}
1136EXPORT_SYMBOL(ion_import_dma_buf_fd);
1137
Laura Abbottb1fa6d82016-09-07 11:49:58 -07001138int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001139{
1140 struct dma_buf *dmabuf;
1141 struct ion_buffer *buffer;
1142
1143 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001144 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001145 return PTR_ERR(dmabuf);
1146
1147 /* if this memory came from ion */
1148 if (dmabuf->ops != &dma_buf_ops) {
1149 pr_err("%s: can not sync dmabuf from another exporter\n",
1150 __func__);
1151 dma_buf_put(dmabuf);
1152 return -EINVAL;
1153 }
1154 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001155
1156 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1157 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001158 dma_buf_put(dmabuf);
1159 return 0;
1160}
1161
Laura Abbott02b23802016-09-07 11:49:59 -07001162int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1163{
1164 struct ion_device *dev = client->dev;
1165 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1166 int ret = -EINVAL, cnt = 0, max_cnt;
1167 struct ion_heap *heap;
1168 struct ion_heap_data hdata;
1169
1170 memset(&hdata, 0, sizeof(hdata));
1171
1172 down_read(&dev->lock);
1173 if (!buffer) {
1174 query->cnt = dev->heap_cnt;
1175 ret = 0;
1176 goto out;
1177 }
1178
1179 if (query->cnt <= 0)
1180 goto out;
1181
1182 max_cnt = query->cnt;
1183
1184 plist_for_each_entry(heap, &dev->heaps, node) {
1185 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1186 hdata.name[sizeof(hdata.name) - 1] = '\0';
1187 hdata.type = heap->type;
1188 hdata.heap_id = heap->id;
1189
1190 ret = copy_to_user(&buffer[cnt],
1191 &hdata, sizeof(hdata));
1192
1193 cnt++;
1194 if (cnt >= max_cnt)
1195 break;
1196 }
1197
1198 query->cnt = cnt;
1199out:
1200 up_read(&dev->lock);
1201 return ret;
1202}
1203
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001204static int ion_release(struct inode *inode, struct file *file)
1205{
1206 struct ion_client *client = file->private_data;
1207
1208 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001209 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001210 return 0;
1211}
1212
1213static int ion_open(struct inode *inode, struct file *file)
1214{
1215 struct miscdevice *miscdev = file->private_data;
1216 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1217 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001218 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001219
1220 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001221 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1222 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001223 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001224 return PTR_ERR(client);
1225 file->private_data = client;
1226
1227 return 0;
1228}
1229
1230static const struct file_operations ion_fops = {
1231 .owner = THIS_MODULE,
1232 .open = ion_open,
1233 .release = ion_release,
1234 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001235 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001236};
1237
1238static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001239 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001240{
1241 size_t size = 0;
1242 struct rb_node *n;
1243
1244 mutex_lock(&client->lock);
1245 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1246 struct ion_handle *handle = rb_entry(n,
1247 struct ion_handle,
1248 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001249 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001250 size += handle->buffer->size;
1251 }
1252 mutex_unlock(&client->lock);
1253 return size;
1254}
1255
1256static int ion_debug_heap_show(struct seq_file *s, void *unused)
1257{
1258 struct ion_heap *heap = s->private;
1259 struct ion_device *dev = heap->dev;
1260 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001261 size_t total_size = 0;
1262 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001263
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001264 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001265 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001266
Neil Zhang948c4db2016-01-26 17:39:06 +08001267 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001268 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001269 struct ion_client *client = rb_entry(n, struct ion_client,
1270 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001271 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001272
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001273 if (!size)
1274 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001275 if (client->task) {
1276 char task_comm[TASK_COMM_LEN];
1277
1278 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001279 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001280 client->pid, size);
1281 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001282 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001283 client->pid, size);
1284 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001285 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001286 mutex_unlock(&debugfs_mutex);
1287
Iulia Manda164ad862014-03-11 20:12:29 +02001288 seq_puts(s, "----------------------------------------------------\n");
1289 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001290 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001291 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1292 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1293 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001294 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001295 continue;
1296 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001297 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001298 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001299 buffer->task_comm, buffer->pid,
1300 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001301 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001302 total_orphaned_size += buffer->size;
1303 }
1304 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001305 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001306 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001307 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001308 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001309 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001310 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001311 seq_printf(s, "%16s %16zu\n", "deferred free",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001312 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001313 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001314
1315 if (heap->debug_show)
1316 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001317
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001318 return 0;
1319}
1320
1321static int ion_debug_heap_open(struct inode *inode, struct file *file)
1322{
1323 return single_open(file, ion_debug_heap_show, inode->i_private);
1324}
1325
1326static const struct file_operations debug_heap_fops = {
1327 .open = ion_debug_heap_open,
1328 .read = seq_read,
1329 .llseek = seq_lseek,
1330 .release = single_release,
1331};
1332
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001333static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001334{
John Stultze1d855b2013-12-13 19:26:33 -08001335 struct ion_heap *heap = data;
1336 struct shrink_control sc;
1337 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001338
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001339 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001340 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001341
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001342 if (!val) {
1343 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1344 sc.nr_to_scan = objs;
1345 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001346
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001347 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001348 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001349}
1350
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001351static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001352{
John Stultze1d855b2013-12-13 19:26:33 -08001353 struct ion_heap *heap = data;
1354 struct shrink_control sc;
1355 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001356
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001357 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001358 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001359
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001360 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001361 *val = objs;
1362 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001363}
1364
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001365DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001366 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001367
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001368void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1369{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001370 struct dentry *debug_file;
1371
Laura Abbottf82ad602016-08-08 09:52:56 -07001372 if (!heap->ops->allocate || !heap->ops->free)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001373 pr_err("%s: can not add heap with invalid ops struct.\n",
1374 __func__);
1375
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001376 spin_lock_init(&heap->free_lock);
1377 heap->free_list_size = 0;
1378
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001379 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1380 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001381
Colin Crossb9daf0b2014-02-17 13:58:38 -08001382 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1383 ion_heap_init_shrinker(heap);
1384
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001385 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001386 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301387 /*
1388 * use negative heap->id to reverse the priority -- when traversing
1389 * the list later attempt higher id numbers first
1390 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001391 plist_node_init(&heap->node, -heap->id);
1392 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001393 debug_file = debugfs_create_file(heap->name, 0664,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001394 dev->heaps_debug_root, heap,
1395 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001396
1397 if (!debug_file) {
1398 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001399
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001400 path = dentry_path(dev->heaps_debug_root, buf, 256);
1401 pr_err("Failed to create heap debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001402 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001403 }
1404
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001405 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001406 char debug_name[64];
1407
1408 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001409 debug_file = debugfs_create_file(
1410 debug_name, 0644, dev->heaps_debug_root, heap,
1411 &debug_shrink_fops);
1412 if (!debug_file) {
1413 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001414
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001415 path = dentry_path(dev->heaps_debug_root, buf, 256);
1416 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001417 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001418 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001419 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001420
Laura Abbott02b23802016-09-07 11:49:59 -07001421 dev->heap_cnt++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001422 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001423}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001424EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001425
1426struct ion_device *ion_device_create(long (*custom_ioctl)
1427 (struct ion_client *client,
1428 unsigned int cmd,
1429 unsigned long arg))
1430{
1431 struct ion_device *idev;
1432 int ret;
1433
Ben Marsh411059f2016-03-28 19:26:19 +02001434 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001435 if (!idev)
1436 return ERR_PTR(-ENOMEM);
1437
1438 idev->dev.minor = MISC_DYNAMIC_MINOR;
1439 idev->dev.name = "ion";
1440 idev->dev.fops = &ion_fops;
1441 idev->dev.parent = NULL;
1442 ret = misc_register(&idev->dev);
1443 if (ret) {
1444 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301445 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001446 return ERR_PTR(ret);
1447 }
1448
1449 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001450 if (!idev->debug_root) {
1451 pr_err("ion: failed to create debugfs root directory.\n");
1452 goto debugfs_done;
1453 }
1454 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1455 if (!idev->heaps_debug_root) {
1456 pr_err("ion: failed to create debugfs heaps directory.\n");
1457 goto debugfs_done;
1458 }
1459 idev->clients_debug_root = debugfs_create_dir("clients",
1460 idev->debug_root);
1461 if (!idev->clients_debug_root)
1462 pr_err("ion: failed to create debugfs clients directory.\n");
1463
1464debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001465
1466 idev->custom_ioctl = custom_ioctl;
1467 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001468 mutex_init(&idev->buffer_lock);
1469 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001470 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001471 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001472 ion_root_client = &idev->clients;
1473 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001474 return idev;
1475}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001476EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001477
1478void ion_device_destroy(struct ion_device *dev)
1479{
1480 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001481 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001482 /* XXX need to free the heaps and clients ? */
1483 kfree(dev);
1484}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001485EXPORT_SYMBOL(ion_device_destroy);