blob: e015d84e0c58136bdc12eb915e8e54f77619142c [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053019#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080021#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080024#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080026#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080032#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080035#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080036#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080037#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080038#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039
40#include "ion.h"
41#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080042#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080043
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080044bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
45{
John Stultze1d855b2013-12-13 19:26:33 -080046 return (buffer->flags & ION_FLAG_CACHED) &&
47 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -080048}
49
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080050bool ion_buffer_cached(struct ion_buffer *buffer)
51{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080052 return !!(buffer->flags & ION_FLAG_CACHED);
53}
54
55static inline struct page *ion_buffer_page(struct page *page)
56{
57 return (struct page *)((unsigned long)page & ~(1UL));
58}
59
60static inline bool ion_buffer_page_is_dirty(struct page *page)
61{
62 return !!((unsigned long)page & 1UL);
63}
64
65static inline void ion_buffer_page_dirty(struct page **page)
66{
67 *page = (struct page *)((unsigned long)(*page) | 1UL);
68}
69
70static inline void ion_buffer_page_clean(struct page **page)
71{
72 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080073}
74
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080075/* this function should only be called while dev->lock is held */
76static void ion_buffer_add(struct ion_device *dev,
77 struct ion_buffer *buffer)
78{
79 struct rb_node **p = &dev->buffers.rb_node;
80 struct rb_node *parent = NULL;
81 struct ion_buffer *entry;
82
83 while (*p) {
84 parent = *p;
85 entry = rb_entry(parent, struct ion_buffer, node);
86
87 if (buffer < entry) {
88 p = &(*p)->rb_left;
89 } else if (buffer > entry) {
90 p = &(*p)->rb_right;
91 } else {
92 pr_err("%s: buffer already found.", __func__);
93 BUG();
94 }
95 }
96
97 rb_link_node(&buffer->node, parent, p);
98 rb_insert_color(&buffer->node, &dev->buffers);
99}
100
101/* this function should only be called while dev->lock is held */
102static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200103 struct ion_device *dev,
104 unsigned long len,
105 unsigned long align,
106 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800107{
108 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800109 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800110 struct scatterlist *sg;
111 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800112
Ben Marsh411059f2016-03-28 19:26:19 +0200113 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800114 if (!buffer)
115 return ERR_PTR(-ENOMEM);
116
117 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800118 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800119 kref_init(&buffer->ref);
120
121 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800122
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800123 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800124 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
125 goto err2;
126
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800127 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800128 ret = heap->ops->allocate(heap, buffer, len, align,
129 flags);
130 if (ret)
131 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800132 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800133
Laura Abbottf82ad602016-08-08 09:52:56 -0700134 if (buffer->sg_table == NULL) {
135 WARN_ONCE(1, "This heap needs to set the sgtable");
Rohit kumara56d0922015-09-30 11:07:35 +0530136 ret = -EINVAL;
137 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800138 }
Rohit kumara56d0922015-09-30 11:07:35 +0530139
Laura Abbottf82ad602016-08-08 09:52:56 -0700140 table = buffer->sg_table;
141 buffer->dev = dev;
142 buffer->size = len;
143
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800144 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800145 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
146 struct scatterlist *sg;
147 int i, j, k = 0;
148
149 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
150 if (!buffer->pages) {
151 ret = -ENOMEM;
Laura Abbottf82ad602016-08-08 09:52:56 -0700152 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800153 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800154
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800155 for_each_sg(table->sgl, sg, table->nents, i) {
156 struct page *page = sg_page(sg);
157
Colin Cross06e0dca2013-12-13 14:25:02 -0800158 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800159 buffer->pages[k++] = page++;
160 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800161 }
162
163 buffer->dev = dev;
164 buffer->size = len;
165 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800166 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530167 /*
168 * this will set up dma addresses for the sglist -- it is not
169 * technically correct as per the dma api -- a specific
170 * device isn't really taking ownership here. However, in practice on
171 * our systems the only dma_address space is physical addresses.
172 * Additionally, we can't afford the overhead of invalidating every
173 * allocation via dma_map_sg. The implicit contract here is that
174 * memory coming from the heaps is ready for dma, ie if it has a
175 * cached mapping that mapping has been invalidated
176 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000177 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800178 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000179 sg_dma_len(sg) = sg->length;
180 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800181 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800182 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800183 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800184 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800185
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800186err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530187 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800188err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800189 kfree(buffer);
190 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800191}
192
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800193void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800194{
Laura Abbott8da07ee2018-05-14 14:35:09 -0700195 if (buffer->kmap_cnt > 0) {
196 pr_warn_once("%s: buffer still mapped in the kernel\n",
197 __func__);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800198 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbott8da07ee2018-05-14 14:35:09 -0700199 }
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800200 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100201 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800202 kfree(buffer);
203}
204
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800205static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206{
207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800208 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800209 struct ion_device *dev = buffer->dev;
210
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800211 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800212 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800213 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800214
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800215 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
216 ion_heap_freelist_add(heap, buffer);
217 else
218 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800219}
220
221static void ion_buffer_get(struct ion_buffer *buffer)
222{
223 kref_get(&buffer->ref);
224}
225
226static int ion_buffer_put(struct ion_buffer *buffer)
227{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800228 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800229}
230
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800231static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
232{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800233 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800234 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800235 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800236}
237
238static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
239{
240 /*
241 * when a buffer is removed from a handle, if it is not in
242 * any other handles, copy the taskcomm and the pid of the
243 * process it's being removed from into the buffer. At this
244 * point there will be no way to track what processes this buffer is
245 * being used by, it only exists as a dma_buf file descriptor.
246 * The taskcomm and pid can provide a debug hint as to where this fd
247 * is in the system
248 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800249 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800250 buffer->handle_count--;
251 BUG_ON(buffer->handle_count < 0);
252 if (!buffer->handle_count) {
253 struct task_struct *task;
254
255 task = current->group_leader;
256 get_task_comm(buffer->task_comm, task);
257 buffer->pid = task_pid_nr(task);
258 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800259 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800260}
261
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800262static struct ion_handle *ion_handle_create(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200263 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800264{
265 struct ion_handle *handle;
266
Ben Marsh411059f2016-03-28 19:26:19 +0200267 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800268 if (!handle)
269 return ERR_PTR(-ENOMEM);
270 kref_init(&handle->ref);
271 RB_CLEAR_NODE(&handle->node);
272 handle->client = client;
273 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800274 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800275 handle->buffer = buffer;
276
277 return handle;
278}
279
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800280static void ion_handle_kmap_put(struct ion_handle *);
281
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800282static void ion_handle_destroy(struct kref *kref)
283{
284 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800285 struct ion_client *client = handle->client;
286 struct ion_buffer *buffer = handle->buffer;
287
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800288 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800289 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800290 ion_handle_kmap_put(handle);
291 mutex_unlock(&buffer->lock);
292
Colin Cross47b40452013-12-13 14:24:50 -0800293 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800294 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800295 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800296
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800297 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800298 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800299
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300 kfree(handle);
301}
302
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800303static void ion_handle_get(struct ion_handle *handle)
304{
305 kref_get(&handle->ref);
306}
307
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700308int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000309{
Johanna Abrahamsson45052462016-08-24 00:41:54 +0200310 return kref_put(&handle->ref, ion_handle_destroy);
EunTaik Lee95902322016-02-24 04:38:06 +0000311}
312
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700313int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800314{
Colin Cross83271f62013-12-13 14:24:59 -0800315 struct ion_client *client = handle->client;
316 int ret;
317
318 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000319 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800320 mutex_unlock(&client->lock);
321
322 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800323}
324
325static struct ion_handle *ion_handle_lookup(struct ion_client *client,
326 struct ion_buffer *buffer)
327{
Colin Crosse1cf3682013-12-13 14:24:51 -0800328 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800329
Colin Crosse1cf3682013-12-13 14:24:51 -0800330 while (n) {
331 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900332
Colin Crosse1cf3682013-12-13 14:24:51 -0800333 if (buffer < entry->buffer)
334 n = n->rb_left;
335 else if (buffer > entry->buffer)
336 n = n->rb_right;
337 else
338 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800339 }
Colin Cross9e907652013-12-13 14:24:49 -0800340 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800341}
342
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700343struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
344 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000345{
346 struct ion_handle *handle;
347
348 handle = idr_find(&client->idr, id);
349 if (handle)
350 ion_handle_get(handle);
351
352 return handle ? handle : ERR_PTR(-EINVAL);
353}
354
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700355struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
Markus Böhme0045c8d2016-04-06 23:53:39 +0200356 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800357{
Colin Cross83271f62013-12-13 14:24:59 -0800358 struct ion_handle *handle;
359
360 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000361 handle = ion_handle_get_by_id_nolock(client, id);
Colin Cross83271f62013-12-13 14:24:59 -0800362 mutex_unlock(&client->lock);
363
EunTaik Lee95902322016-02-24 04:38:06 +0000364 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800365}
366
John Stultze1d855b2013-12-13 19:26:33 -0800367static bool ion_handle_validate(struct ion_client *client,
368 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800369{
Colin Cross83271f62013-12-13 14:24:59 -0800370 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900371 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800372}
373
374static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
375{
Colin Crossb26661d2013-12-13 14:25:05 -0800376 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800377 struct rb_node **p = &client->handles.rb_node;
378 struct rb_node *parent = NULL;
379 struct ion_handle *entry;
380
Colin Crossb26661d2013-12-13 14:25:05 -0800381 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
382 if (id < 0)
383 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800384
Colin Crossb26661d2013-12-13 14:25:05 -0800385 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800386
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800387 while (*p) {
388 parent = *p;
389 entry = rb_entry(parent, struct ion_handle, node);
390
Colin Crosse1cf3682013-12-13 14:24:51 -0800391 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800392 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800393 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800394 p = &(*p)->rb_right;
395 else
396 WARN(1, "%s: buffer already found.", __func__);
397 }
398
399 rb_link_node(&handle->node, parent, p);
400 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800401
402 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800403}
404
405struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800406 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800407 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800408{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800409 struct ion_handle *handle;
410 struct ion_device *dev = client->dev;
411 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800412 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800413 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800414
Colin Crosse61fc912013-12-13 19:26:14 -0800415 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800416 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800417 /*
418 * traverse the list of heaps available in this system in priority
419 * order. If the heap type is supported by the client, and matches the
420 * request of the caller allocate from it. Repeat until allocate has
421 * succeeded or all heaps have been tried
422 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800423 len = PAGE_ALIGN(len);
424
Colin Crossa14baf72013-12-13 14:25:00 -0800425 if (!len)
426 return ERR_PTR(-EINVAL);
427
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800428 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800429 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800430 /* if the caller didn't specify this heap id */
431 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800432 continue;
433 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800434 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800435 break;
436 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800437 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800438
KyongHo Cho54ac07842013-12-13 14:23:39 -0800439 if (buffer == NULL)
440 return ERR_PTR(-ENODEV);
441
442 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200443 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800444
445 handle = ion_handle_create(client, buffer);
446
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800447 /*
448 * ion_buffer_create will create a buffer with a ref_cnt of 1,
449 * and ion_handle_create will take a second reference, drop one here
450 */
451 ion_buffer_put(buffer);
452
Colin Cross47b40452013-12-13 14:24:50 -0800453 if (IS_ERR(handle))
454 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800455
Colin Cross47b40452013-12-13 14:24:50 -0800456 mutex_lock(&client->lock);
457 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800458 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800459 if (ret) {
460 ion_handle_put(handle);
461 handle = ERR_PTR(ret);
462 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800463
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800464 return handle;
465}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800466EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800467
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700468void ion_free_nolock(struct ion_client *client,
469 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800470{
Johanna Abrahamssonc2bbedf2016-08-24 00:02:45 +0200471 if (!ion_handle_validate(client, handle)) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800472 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800473 return;
474 }
EunTaik Lee95902322016-02-24 04:38:06 +0000475 ion_handle_put_nolock(handle);
476}
477
478void ion_free(struct ion_client *client, struct ion_handle *handle)
479{
480 BUG_ON(client != handle->client);
481
482 mutex_lock(&client->lock);
483 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800484 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800485}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800486EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800487
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800488static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
489{
490 void *vaddr;
491
492 if (buffer->kmap_cnt) {
493 buffer->kmap_cnt++;
494 return buffer->vaddr;
495 }
496 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800497 if (WARN_ONCE(vaddr == NULL,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200498 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800499 return ERR_PTR(-EINVAL);
500 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800501 return vaddr;
502 buffer->vaddr = vaddr;
503 buffer->kmap_cnt++;
504 return vaddr;
505}
506
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800507static void *ion_handle_kmap_get(struct ion_handle *handle)
508{
509 struct ion_buffer *buffer = handle->buffer;
510 void *vaddr;
511
512 if (handle->kmap_cnt) {
513 handle->kmap_cnt++;
514 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800515 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800516 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800517 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800518 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800519 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800520 return vaddr;
521}
522
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800523static void ion_buffer_kmap_put(struct ion_buffer *buffer)
524{
525 buffer->kmap_cnt--;
526 if (!buffer->kmap_cnt) {
527 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
528 buffer->vaddr = NULL;
529 }
530}
531
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800532static void ion_handle_kmap_put(struct ion_handle *handle)
533{
534 struct ion_buffer *buffer = handle->buffer;
535
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700536 if (!handle->kmap_cnt) {
537 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
538 return;
539 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800540 handle->kmap_cnt--;
541 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800542 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800543}
544
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800545void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
546{
547 struct ion_buffer *buffer;
548 void *vaddr;
549
550 mutex_lock(&client->lock);
551 if (!ion_handle_validate(client, handle)) {
552 pr_err("%s: invalid handle passed to map_kernel.\n",
553 __func__);
554 mutex_unlock(&client->lock);
555 return ERR_PTR(-EINVAL);
556 }
557
558 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800559
560 if (!handle->buffer->heap->ops->map_kernel) {
561 pr_err("%s: map_kernel is not implemented by this heap.\n",
562 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800563 mutex_unlock(&client->lock);
564 return ERR_PTR(-ENODEV);
565 }
566
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800567 mutex_lock(&buffer->lock);
568 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800569 mutex_unlock(&buffer->lock);
570 mutex_unlock(&client->lock);
571 return vaddr;
572}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800573EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800574
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800575void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
576{
577 struct ion_buffer *buffer;
578
579 mutex_lock(&client->lock);
580 buffer = handle->buffer;
581 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800582 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800583 mutex_unlock(&buffer->lock);
584 mutex_unlock(&client->lock);
585}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800586EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800587
Neil Zhang948c4db2016-01-26 17:39:06 +0800588static struct mutex debugfs_mutex;
589static struct rb_root *ion_root_client;
590static int is_client_alive(struct ion_client *client)
591{
592 struct rb_node *node;
593 struct ion_client *tmp;
594 struct ion_device *dev;
595
596 node = ion_root_client->rb_node;
597 dev = container_of(ion_root_client, struct ion_device, clients);
598
599 down_read(&dev->lock);
600 while (node) {
601 tmp = rb_entry(node, struct ion_client, node);
602 if (client < tmp) {
603 node = node->rb_left;
604 } else if (client > tmp) {
605 node = node->rb_right;
606 } else {
607 up_read(&dev->lock);
608 return 1;
609 }
610 }
611
612 up_read(&dev->lock);
613 return 0;
614}
615
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800616static int ion_debug_client_show(struct seq_file *s, void *unused)
617{
618 struct ion_client *client = s->private;
619 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800620 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800621 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800622 int i;
623
Neil Zhang948c4db2016-01-26 17:39:06 +0800624 mutex_lock(&debugfs_mutex);
625 if (!is_client_alive(client)) {
Alistair Strachan7cc0d0f2018-04-25 22:11:01 -0700626 seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n",
Neil Zhang948c4db2016-01-26 17:39:06 +0800627 client);
628 mutex_unlock(&debugfs_mutex);
629 return 0;
630 }
631
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800632 mutex_lock(&client->lock);
633 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
634 struct ion_handle *handle = rb_entry(n, struct ion_handle,
635 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800636 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800637
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800638 if (!names[id])
639 names[id] = handle->buffer->heap->name;
640 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800641 }
642 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800643 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800644
645 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800646 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800647 if (!names[i])
648 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800649 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800650 }
651 return 0;
652}
653
654static int ion_debug_client_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, ion_debug_client_show, inode->i_private);
657}
658
659static const struct file_operations debug_client_fops = {
660 .open = ion_debug_client_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800666static int ion_get_client_serial(const struct rb_root *root,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200667 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800668{
669 int serial = -1;
670 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900671
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800672 for (node = rb_first(root); node; node = rb_next(node)) {
673 struct ion_client *client = rb_entry(node, struct ion_client,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700674 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900675
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800676 if (strcmp(client->name, name))
677 continue;
678 serial = max(serial, client->display_serial);
679 }
680 return serial + 1;
681}
682
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800683struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800684 const char *name)
685{
686 struct ion_client *client;
687 struct task_struct *task;
688 struct rb_node **p;
689 struct rb_node *parent = NULL;
690 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800691 pid_t pid;
692
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800693 if (!name) {
694 pr_err("%s: Name cannot be null\n", __func__);
695 return ERR_PTR(-EINVAL);
696 }
697
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800698 get_task_struct(current->group_leader);
699 task_lock(current->group_leader);
700 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530701 /*
702 * don't bother to store task struct for kernel threads,
703 * they can't be killed anyway
704 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800705 if (current->group_leader->flags & PF_KTHREAD) {
706 put_task_struct(current->group_leader);
707 task = NULL;
708 } else {
709 task = current->group_leader;
710 }
711 task_unlock(current->group_leader);
712
Ben Marsh411059f2016-03-28 19:26:19 +0200713 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800714 if (!client)
715 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800716
717 client->dev = dev;
718 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800719 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800720 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800721 client->task = task;
722 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800723 client->name = kstrdup(name, GFP_KERNEL);
724 if (!client->name)
725 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800726
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800727 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800728 client->display_serial = ion_get_client_serial(&dev->clients, name);
729 client->display_name = kasprintf(
730 GFP_KERNEL, "%s-%d", name, client->display_serial);
731 if (!client->display_name) {
732 up_write(&dev->lock);
733 goto err_free_client_name;
734 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800735 p = &dev->clients.rb_node;
736 while (*p) {
737 parent = *p;
738 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800739
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800740 if (client < entry)
741 p = &(*p)->rb_left;
742 else if (client > entry)
743 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800744 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800745 rb_link_node(&client->node, parent, p);
746 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800747
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800748 client->debug_root = debugfs_create_file(client->display_name, 0664,
Didik Setiawanb2bcdad2016-08-24 16:08:01 +0700749 dev->clients_debug_root,
750 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800751 if (!client->debug_root) {
752 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700753
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800754 path = dentry_path(dev->clients_debug_root, buf, 256);
755 pr_err("Failed to create client debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200756 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800757 }
758
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800759 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800760
761 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800762
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800763err_free_client_name:
764 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800765err_free_client:
766 kfree(client);
767err_put_task_struct:
768 if (task)
769 put_task_struct(current->group_leader);
770 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800771}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800772EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800773
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800774void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800775{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800776 struct ion_device *dev = client->dev;
777 struct rb_node *n;
778
Neil Zhang948c4db2016-01-26 17:39:06 +0800779 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800780 while ((n = rb_first(&client->handles))) {
781 struct ion_handle *handle = rb_entry(n, struct ion_handle,
782 node);
783 ion_handle_destroy(&handle->ref);
784 }
Colin Cross47b40452013-12-13 14:24:50 -0800785
Colin Cross47b40452013-12-13 14:24:50 -0800786 idr_destroy(&client->idr);
787
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800788 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800789 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800790 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800791 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800792 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800793 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800794
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800795 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800796 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800797 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800798 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800799}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800800EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800801
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800802static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
803 struct device *dev,
804 enum dma_data_direction direction);
805
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800806static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
807 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800808{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800809 struct dma_buf *dmabuf = attachment->dmabuf;
810 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800811
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800812 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800813 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800814}
815
816static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
817 struct sg_table *table,
818 enum dma_data_direction direction)
819{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800820}
821
Colin Crosse946b202013-12-13 14:25:01 -0800822void ion_pages_sync_for_device(struct device *dev, struct page *page,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200823 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -0800824{
825 struct scatterlist sg;
826
827 sg_init_table(&sg, 1);
828 sg_set_page(&sg, page, size, 0);
829 /*
830 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530831 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800832 * hardware.
833 */
834 sg_dma_address(&sg) = page_to_phys(page);
835 dma_sync_sg_for_device(dev, &sg, 1, dir);
836}
837
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800838struct ion_vma_list {
839 struct list_head list;
840 struct vm_area_struct *vma;
841};
842
843static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
844 struct device *dev,
845 enum dma_data_direction dir)
846{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800847 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800848 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
849 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800850
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800851 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800852 return;
853
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800854 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800855 for (i = 0; i < pages; i++) {
856 struct page *page = buffer->pages[i];
857
858 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800859 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200860 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -0800861
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800862 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800863 }
864 list_for_each_entry(vma_list, &buffer->vmas, list) {
865 struct vm_area_struct *vma = vma_list->vma;
866
867 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
868 NULL);
869 }
870 mutex_unlock(&buffer->lock);
871}
872
Colin Crossf63958d2013-12-13 19:26:28 -0800873static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800874{
875 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800876 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800877 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800878
879 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800880 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800881 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800882
883 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
884 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800885 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800886 if (ret)
887 return VM_FAULT_ERROR;
888
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800889 return VM_FAULT_NOPAGE;
890}
891
892static void ion_vm_open(struct vm_area_struct *vma)
893{
894 struct ion_buffer *buffer = vma->vm_private_data;
895 struct ion_vma_list *vma_list;
896
Ben Marsh411059f2016-03-28 19:26:19 +0200897 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800898 if (!vma_list)
899 return;
900 vma_list->vma = vma;
901 mutex_lock(&buffer->lock);
902 list_add(&vma_list->list, &buffer->vmas);
903 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800904}
905
906static void ion_vm_close(struct vm_area_struct *vma)
907{
908 struct ion_buffer *buffer = vma->vm_private_data;
909 struct ion_vma_list *vma_list, *tmp;
910
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800911 mutex_lock(&buffer->lock);
912 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
913 if (vma_list->vma != vma)
914 continue;
915 list_del(&vma_list->list);
916 kfree(vma_list);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800917 break;
918 }
919 mutex_unlock(&buffer->lock);
920}
921
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -0700922static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800923 .open = ion_vm_open,
924 .close = ion_vm_close,
925 .fault = ion_vm_fault,
926};
927
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800928static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800929{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800930 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800931 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800932
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800933 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +0200934 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200935 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800936 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800937 }
938
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800939 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -0800940 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
941 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800942 vma->vm_private_data = buffer;
943 vma->vm_ops = &ion_vma_ops;
944 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800945 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800946 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800947
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800948 if (!(buffer->flags & ION_FLAG_CACHED))
949 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
950
951 mutex_lock(&buffer->lock);
952 /* now map it to userspace */
953 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
954 mutex_unlock(&buffer->lock);
955
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800956 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800957 pr_err("%s: failure mapping buffer to userspace\n",
958 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800959
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800960 return ret;
961}
962
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800963static void ion_dma_buf_release(struct dma_buf *dmabuf)
964{
965 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900966
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800967 ion_buffer_put(buffer);
968}
969
970static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
971{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800972 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +0900973
Greg Hackmann12edf532013-12-13 14:24:00 -0800974 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800975}
976
977static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
978 void *ptr)
979{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800980}
981
Tiago Vignatti831e9da2015-12-22 19:36:45 -0200982static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800983 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800984{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800985 struct ion_buffer *buffer = dmabuf->priv;
986 void *vaddr;
987
988 if (!buffer->heap->ops->map_kernel) {
989 pr_err("%s: map kernel is not implemented by this heap.\n",
990 __func__);
991 return -ENODEV;
992 }
993
994 mutex_lock(&buffer->lock);
995 vaddr = ion_buffer_kmap_get(buffer);
996 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +0530997 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800998}
999
Chris Wilson18b862d2016-03-18 20:02:39 +00001000static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1001 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001002{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001003 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001004
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001005 mutex_lock(&buffer->lock);
1006 ion_buffer_kmap_put(buffer);
1007 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001008
1009 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001010}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001011
Colin Crossf63958d2013-12-13 19:26:28 -08001012static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001013 .map_dma_buf = ion_map_dma_buf,
1014 .unmap_dma_buf = ion_unmap_dma_buf,
1015 .mmap = ion_mmap,
1016 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001017 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1018 .end_cpu_access = ion_dma_buf_end_cpu_access,
1019 .kmap_atomic = ion_dma_buf_kmap,
1020 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001021 .kmap = ion_dma_buf_kmap,
1022 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001023};
1024
Johan Mossberg22ba4322013-12-13 14:24:34 -08001025struct dma_buf *ion_share_dma_buf(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001026 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001027{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001028 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001029 struct ion_buffer *buffer;
1030 struct dma_buf *dmabuf;
1031 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301032
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001033 mutex_lock(&client->lock);
1034 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001035 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001036 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001037 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001038 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001039 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001040 buffer = handle->buffer;
1041 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001042 mutex_unlock(&client->lock);
1043
Sumit Semwal72449cb2015-02-21 09:00:17 +05301044 exp_info.ops = &dma_buf_ops;
1045 exp_info.size = buffer->size;
1046 exp_info.flags = O_RDWR;
1047 exp_info.priv = buffer;
1048
Sumit Semwald8fbe342015-01-23 12:53:43 +05301049 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001050 if (IS_ERR(dmabuf)) {
1051 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001052 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001053 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001054
1055 return dmabuf;
1056}
1057EXPORT_SYMBOL(ion_share_dma_buf);
1058
1059int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1060{
1061 struct dma_buf *dmabuf;
1062 int fd;
1063
1064 dmabuf = ion_share_dma_buf(client, handle);
1065 if (IS_ERR(dmabuf))
1066 return PTR_ERR(dmabuf);
1067
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001068 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001069 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001070 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001071
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001072 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001073}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001074EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001075
Rohit kumar9f903812016-01-12 09:31:46 +05301076struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1077 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001078{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001079 struct ion_buffer *buffer;
1080 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001081 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001082
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001083 /* if this memory came from ion */
1084
1085 if (dmabuf->ops != &dma_buf_ops) {
1086 pr_err("%s: can not import dmabuf from another exporter\n",
1087 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001088 return ERR_PTR(-EINVAL);
1089 }
1090 buffer = dmabuf->priv;
1091
1092 mutex_lock(&client->lock);
1093 /* if a handle exists for this buffer just take a reference to it */
1094 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001095 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001096 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001097 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001098 goto end;
1099 }
Colin Cross83271f62013-12-13 14:24:59 -08001100
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001101 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001102 if (IS_ERR(handle)) {
1103 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001104 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001105 }
Colin Cross83271f62013-12-13 14:24:59 -08001106
Colin Cross47b40452013-12-13 14:24:50 -08001107 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001108 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001109 if (ret) {
1110 ion_handle_put(handle);
1111 handle = ERR_PTR(ret);
1112 }
Colin Cross83271f62013-12-13 14:24:59 -08001113
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001114end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001115 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001116}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001117EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001118
Rohit kumar9f903812016-01-12 09:31:46 +05301119struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1120{
1121 struct dma_buf *dmabuf;
1122 struct ion_handle *handle;
1123
1124 dmabuf = dma_buf_get(fd);
1125 if (IS_ERR(dmabuf))
1126 return ERR_CAST(dmabuf);
1127
1128 handle = ion_import_dma_buf(client, dmabuf);
1129 dma_buf_put(dmabuf);
1130 return handle;
1131}
1132EXPORT_SYMBOL(ion_import_dma_buf_fd);
1133
Laura Abbottb1fa6d82016-09-07 11:49:58 -07001134int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001135{
1136 struct dma_buf *dmabuf;
1137 struct ion_buffer *buffer;
1138
1139 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001140 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001141 return PTR_ERR(dmabuf);
1142
1143 /* if this memory came from ion */
1144 if (dmabuf->ops != &dma_buf_ops) {
1145 pr_err("%s: can not sync dmabuf from another exporter\n",
1146 __func__);
1147 dma_buf_put(dmabuf);
1148 return -EINVAL;
1149 }
1150 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001151
1152 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1153 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001154 dma_buf_put(dmabuf);
1155 return 0;
1156}
1157
Laura Abbott02b23802016-09-07 11:49:59 -07001158int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1159{
1160 struct ion_device *dev = client->dev;
1161 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1162 int ret = -EINVAL, cnt = 0, max_cnt;
1163 struct ion_heap *heap;
1164 struct ion_heap_data hdata;
1165
1166 memset(&hdata, 0, sizeof(hdata));
1167
1168 down_read(&dev->lock);
1169 if (!buffer) {
1170 query->cnt = dev->heap_cnt;
1171 ret = 0;
1172 goto out;
1173 }
1174
1175 if (query->cnt <= 0)
1176 goto out;
1177
1178 max_cnt = query->cnt;
1179
1180 plist_for_each_entry(heap, &dev->heaps, node) {
1181 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1182 hdata.name[sizeof(hdata.name) - 1] = '\0';
1183 hdata.type = heap->type;
1184 hdata.heap_id = heap->id;
1185
Dan Carpentercf559022016-10-13 15:55:08 +03001186 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1187 ret = -EFAULT;
1188 goto out;
1189 }
Laura Abbott02b23802016-09-07 11:49:59 -07001190
1191 cnt++;
1192 if (cnt >= max_cnt)
1193 break;
1194 }
1195
1196 query->cnt = cnt;
1197out:
1198 up_read(&dev->lock);
1199 return ret;
1200}
1201
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001202static int ion_release(struct inode *inode, struct file *file)
1203{
1204 struct ion_client *client = file->private_data;
1205
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001206 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001207 return 0;
1208}
1209
1210static int ion_open(struct inode *inode, struct file *file)
1211{
1212 struct miscdevice *miscdev = file->private_data;
1213 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1214 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001215 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001216
Laura Abbott483ed032014-02-17 13:58:35 -08001217 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1218 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001219 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001220 return PTR_ERR(client);
1221 file->private_data = client;
1222
1223 return 0;
1224}
1225
1226static const struct file_operations ion_fops = {
1227 .owner = THIS_MODULE,
1228 .open = ion_open,
1229 .release = ion_release,
1230 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001231 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001232};
1233
1234static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001235 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001236{
1237 size_t size = 0;
1238 struct rb_node *n;
1239
1240 mutex_lock(&client->lock);
1241 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1242 struct ion_handle *handle = rb_entry(n,
1243 struct ion_handle,
1244 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001245 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001246 size += handle->buffer->size;
1247 }
1248 mutex_unlock(&client->lock);
1249 return size;
1250}
1251
1252static int ion_debug_heap_show(struct seq_file *s, void *unused)
1253{
1254 struct ion_heap *heap = s->private;
1255 struct ion_device *dev = heap->dev;
1256 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001257 size_t total_size = 0;
1258 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001259
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001260 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001261 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001262
Neil Zhang948c4db2016-01-26 17:39:06 +08001263 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001264 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001265 struct ion_client *client = rb_entry(n, struct ion_client,
1266 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001267 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001268
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001269 if (!size)
1270 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001271 if (client->task) {
1272 char task_comm[TASK_COMM_LEN];
1273
1274 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001275 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001276 client->pid, size);
1277 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001278 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001279 client->pid, size);
1280 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001281 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001282 mutex_unlock(&debugfs_mutex);
1283
Iulia Manda164ad862014-03-11 20:12:29 +02001284 seq_puts(s, "----------------------------------------------------\n");
1285 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001286 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001287 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1288 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1289 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001290 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001291 continue;
1292 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001293 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001294 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001295 buffer->task_comm, buffer->pid,
1296 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001297 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001298 total_orphaned_size += buffer->size;
1299 }
1300 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001301 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001302 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001303 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001304 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001305 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001306 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001307 seq_printf(s, "%16s %16zu\n", "deferred free",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001308 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001309 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001310
1311 if (heap->debug_show)
1312 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001313
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001314 return 0;
1315}
1316
1317static int ion_debug_heap_open(struct inode *inode, struct file *file)
1318{
1319 return single_open(file, ion_debug_heap_show, inode->i_private);
1320}
1321
1322static const struct file_operations debug_heap_fops = {
1323 .open = ion_debug_heap_open,
1324 .read = seq_read,
1325 .llseek = seq_lseek,
1326 .release = single_release,
1327};
1328
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001329static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001330{
John Stultze1d855b2013-12-13 19:26:33 -08001331 struct ion_heap *heap = data;
1332 struct shrink_control sc;
1333 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001334
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001335 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001336 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001337
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001338 if (!val) {
1339 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1340 sc.nr_to_scan = objs;
1341 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001342
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001343 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001344 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001345}
1346
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001347static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001348{
John Stultze1d855b2013-12-13 19:26:33 -08001349 struct ion_heap *heap = data;
1350 struct shrink_control sc;
1351 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001352
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001353 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001354 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001355
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001356 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001357 *val = objs;
1358 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001359}
1360
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001361DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001362 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001363
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001364void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1365{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001366 struct dentry *debug_file;
1367
Laura Abbottf82ad602016-08-08 09:52:56 -07001368 if (!heap->ops->allocate || !heap->ops->free)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001369 pr_err("%s: can not add heap with invalid ops struct.\n",
1370 __func__);
1371
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001372 spin_lock_init(&heap->free_lock);
1373 heap->free_list_size = 0;
1374
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001375 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1376 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001377
Colin Crossb9daf0b2014-02-17 13:58:38 -08001378 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1379 ion_heap_init_shrinker(heap);
1380
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001381 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001382 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301383 /*
1384 * use negative heap->id to reverse the priority -- when traversing
1385 * the list later attempt higher id numbers first
1386 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001387 plist_node_init(&heap->node, -heap->id);
1388 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001389 debug_file = debugfs_create_file(heap->name, 0664,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001390 dev->heaps_debug_root, heap,
1391 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001392
1393 if (!debug_file) {
1394 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001395
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001396 path = dentry_path(dev->heaps_debug_root, buf, 256);
1397 pr_err("Failed to create heap debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001398 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001399 }
1400
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001401 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001402 char debug_name[64];
1403
1404 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001405 debug_file = debugfs_create_file(
1406 debug_name, 0644, dev->heaps_debug_root, heap,
1407 &debug_shrink_fops);
1408 if (!debug_file) {
1409 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001410
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001411 path = dentry_path(dev->heaps_debug_root, buf, 256);
1412 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001413 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001414 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001415 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001416
Laura Abbott02b23802016-09-07 11:49:59 -07001417 dev->heap_cnt++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001418 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001419}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001420EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001421
1422struct ion_device *ion_device_create(long (*custom_ioctl)
1423 (struct ion_client *client,
1424 unsigned int cmd,
1425 unsigned long arg))
1426{
1427 struct ion_device *idev;
1428 int ret;
1429
Ben Marsh411059f2016-03-28 19:26:19 +02001430 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001431 if (!idev)
1432 return ERR_PTR(-ENOMEM);
1433
1434 idev->dev.minor = MISC_DYNAMIC_MINOR;
1435 idev->dev.name = "ion";
1436 idev->dev.fops = &ion_fops;
1437 idev->dev.parent = NULL;
1438 ret = misc_register(&idev->dev);
1439 if (ret) {
1440 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301441 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001442 return ERR_PTR(ret);
1443 }
1444
1445 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001446 if (!idev->debug_root) {
1447 pr_err("ion: failed to create debugfs root directory.\n");
1448 goto debugfs_done;
1449 }
1450 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1451 if (!idev->heaps_debug_root) {
1452 pr_err("ion: failed to create debugfs heaps directory.\n");
1453 goto debugfs_done;
1454 }
1455 idev->clients_debug_root = debugfs_create_dir("clients",
1456 idev->debug_root);
1457 if (!idev->clients_debug_root)
1458 pr_err("ion: failed to create debugfs clients directory.\n");
1459
1460debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001461
1462 idev->custom_ioctl = custom_ioctl;
1463 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001464 mutex_init(&idev->buffer_lock);
1465 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001466 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001467 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001468 ion_root_client = &idev->clients;
1469 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001470 return idev;
1471}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001472EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001473
1474void ion_device_destroy(struct ion_device *dev)
1475{
1476 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001477 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001478 /* XXX need to free the heaps and clients ? */
1479 kfree(dev);
1480}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001481EXPORT_SYMBOL(ion_device_destroy);