blob: c1103c7d5070be29f748fcadf41727ef7fafae6b [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Patrick Daly60f0d9a2017-06-30 17:16:21 -07006 * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Daniel Rosenberg20746c12016-12-05 16:28:28 -080019#include <linux/atomic.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053020#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080022#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080023#include <linux/fs.h>
24#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080025#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080026#include <linux/list.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070027#include <linux/list_sort.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080028#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080029#include <linux/miscdevice.h>
30#include <linux/export.h>
31#include <linux/mm.h>
32#include <linux/mm_types.h>
33#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080034#include <linux/slab.h>
35#include <linux/seq_file.h>
36#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080037#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080038#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080039#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080040#include <linux/idr.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070041#include <linux/msm_ion.h>
Olav Hauganff0116e2015-05-28 17:21:45 -070042#include <linux/msm_dma_iommu_mapping.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070043#include <trace/events/kmem.h>
44
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080045
46#include "ion.h"
47#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080048#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080049
Patrick Daly7e8cbb42016-11-01 18:37:42 -070050/**
51 * struct ion_device - the metadata of the ion device node
52 * @dev: the actual misc device
53 * @buffers: an rb tree of all the existing buffers
54 * @buffer_lock: lock protecting the tree of buffers
55 * @lock: rwsem protecting the tree of heaps and clients
56 * @heaps: list of all the heaps in the system
57 * @user_clients: list of all the clients created from userspace
58 */
59struct ion_device {
60 struct miscdevice dev;
61 struct rb_root buffers;
62 /* Protects rb_tree */
63 struct mutex buffer_lock;
64 struct rw_semaphore lock;
65 struct plist_head heaps;
66 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
67 unsigned long arg);
68 struct rb_root clients;
69 struct dentry *debug_root;
70 struct dentry *heaps_debug_root;
71 struct dentry *clients_debug_root;
72};
73
74/**
75 * struct ion_client - a process/hw block local address space
76 * @node: node in the tree of all clients
77 * @dev: backpointer to ion device
78 * @handles: an rb tree of all the handles in this client
79 * @idr: an idr space for allocating handle ids
Patrick Daly60f0d9a2017-06-30 17:16:21 -070080 * @lock: lock protecting the tree of handles and idr
Patrick Daly7e8cbb42016-11-01 18:37:42 -070081 * @name: used for debugging
82 * @display_name: used for debugging (unique version of @name)
83 * @display_serial: used for debugging (to make display_name unique)
84 * @task: used for debugging
85 *
86 * A client represents a list of buffers this client may access.
87 * The mutex stored here is used to protect both handles tree
88 * as well as the handles themselves, and should be held while modifying either.
89 */
90struct ion_client {
91 struct rb_node node;
92 struct ion_device *dev;
93 struct rb_root handles;
94 struct idr idr;
Patrick Daly7e8cbb42016-11-01 18:37:42 -070095 struct mutex lock;
Patrick Dalyeeeb9402016-11-01 20:54:41 -070096 char *name;
Patrick Daly7e8cbb42016-11-01 18:37:42 -070097 char *display_name;
98 int display_serial;
99 struct task_struct *task;
100 pid_t pid;
101 struct dentry *debug_root;
102};
103
104/**
105 * ion_handle - a client local reference to a buffer
106 * @ref: reference count
107 * @client: back pointer to the client the buffer resides in
108 * @buffer: pointer to the buffer
109 * @node: node in the client's handle rbtree
110 * @kmap_cnt: count of times this client has mapped to kernel
111 * @id: client-unique id allocated by client->idr
112 *
113 * Modifications to node, map_cnt or mapping should be protected by the
114 * lock in the client. Other fields are never changed after initialization.
115 */
116struct ion_handle {
117 struct kref ref;
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800118 unsigned int user_ref_count;
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700119 struct ion_client *client;
120 struct ion_buffer *buffer;
121 struct rb_node node;
122 unsigned int kmap_cnt;
123 int id;
124};
125
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800126bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
127{
John Stultze1d855b2013-12-13 19:26:33 -0800128 return (buffer->flags & ION_FLAG_CACHED) &&
129 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800130}
131
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800132bool ion_buffer_cached(struct ion_buffer *buffer)
133{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800134 return !!(buffer->flags & ION_FLAG_CACHED);
135}
136
137static inline struct page *ion_buffer_page(struct page *page)
138{
139 return (struct page *)((unsigned long)page & ~(1UL));
140}
141
142static inline bool ion_buffer_page_is_dirty(struct page *page)
143{
144 return !!((unsigned long)page & 1UL);
145}
146
147static inline void ion_buffer_page_dirty(struct page **page)
148{
149 *page = (struct page *)((unsigned long)(*page) | 1UL);
150}
151
152static inline void ion_buffer_page_clean(struct page **page)
153{
154 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800155}
156
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800157/* this function should only be called while dev->lock is held */
158static void ion_buffer_add(struct ion_device *dev,
159 struct ion_buffer *buffer)
160{
161 struct rb_node **p = &dev->buffers.rb_node;
162 struct rb_node *parent = NULL;
163 struct ion_buffer *entry;
164
165 while (*p) {
166 parent = *p;
167 entry = rb_entry(parent, struct ion_buffer, node);
168
169 if (buffer < entry) {
170 p = &(*p)->rb_left;
171 } else if (buffer > entry) {
172 p = &(*p)->rb_right;
173 } else {
174 pr_err("%s: buffer already found.", __func__);
175 BUG();
176 }
177 }
178
179 rb_link_node(&buffer->node, parent, p);
180 rb_insert_color(&buffer->node, &dev->buffers);
181}
182
183/* this function should only be called while dev->lock is held */
184static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700185 struct ion_device *dev,
186 unsigned long len,
187 unsigned long align,
188 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800189{
190 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800191 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800192 struct scatterlist *sg;
193 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800194
Ben Marsh411059f2016-03-28 19:26:19 +0200195 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800196 if (!buffer)
197 return ERR_PTR(-ENOMEM);
198
199 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800200 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800201 kref_init(&buffer->ref);
202
203 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800204
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800205 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800206 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
207 goto err2;
208
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800209 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800210 ret = heap->ops->allocate(heap, buffer, len, align,
211 flags);
212 if (ret)
213 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800214 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800215
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700216 buffer->dev = dev;
217 buffer->size = len;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700218 INIT_LIST_HEAD(&buffer->vmas);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700219
220 table = heap->ops->map_dma(heap, buffer);
221 if (WARN_ONCE(!table,
222 "heap->ops->map_dma should return ERR_PTR on error"))
223 table = ERR_PTR(-EINVAL);
224 if (IS_ERR(table)) {
Rohit kumara56d0922015-09-30 11:07:35 +0530225 ret = -EINVAL;
226 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800227 }
Rohit kumara56d0922015-09-30 11:07:35 +0530228
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700229 buffer->sg_table = table;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800230 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800231 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
232 struct scatterlist *sg;
233 int i, j, k = 0;
234
235 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
236 if (!buffer->pages) {
237 ret = -ENOMEM;
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700238 goto err;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800239 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800240
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800241 for_each_sg(table->sgl, sg, table->nents, i) {
242 struct page *page = sg_page(sg);
243
Colin Cross06e0dca2013-12-13 14:25:02 -0800244 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800245 buffer->pages[k++] = page++;
246 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800247 }
248
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800249 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530250 /*
251 * this will set up dma addresses for the sglist -- it is not
252 * technically correct as per the dma api -- a specific
253 * device isn't really taking ownership here. However, in practice on
254 * our systems the only dma_address space is physical addresses.
255 * Additionally, we can't afford the overhead of invalidating every
256 * allocation via dma_map_sg. The implicit contract here is that
257 * memory coming from the heaps is ready for dma, ie if it has a
258 * cached mapping that mapping has been invalidated
259 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000260 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800261 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000262 sg_dma_len(sg) = sg->length;
263 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700264
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800265 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800266 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800267 mutex_unlock(&dev->buffer_lock);
Patrick Dalye4640062017-08-01 19:56:52 -0700268 atomic_long_add(len, &heap->total_allocated);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800269 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800270
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700271err:
272 heap->ops->unmap_dma(heap, buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800273err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530274 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800275err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800276 kfree(buffer);
277 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800278}
279
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800280void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800281{
282 if (WARN_ON(buffer->kmap_cnt > 0))
283 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700284 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700285
Patrick Dalye4640062017-08-01 19:56:52 -0700286 atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800287 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100288 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800289 kfree(buffer);
290}
291
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800292static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800293{
294 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800295 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800296 struct ion_device *dev = buffer->dev;
297
Laura Abbott29defcc2014-08-01 16:13:40 -0700298 msm_dma_buf_freed(buffer);
299
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800300 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800301 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800302 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800303
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800304 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
305 ion_heap_freelist_add(heap, buffer);
306 else
307 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800308}
309
310static void ion_buffer_get(struct ion_buffer *buffer)
311{
312 kref_get(&buffer->ref);
313}
314
315static int ion_buffer_put(struct ion_buffer *buffer)
316{
Laura Abbott29defcc2014-08-01 16:13:40 -0700317 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800318}
319
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800320static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
321{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800322 mutex_lock(&buffer->lock);
Laura Abbott29defcc2014-08-01 16:13:40 -0700323 if (buffer->handle_count == 0)
Patrick Dalye4640062017-08-01 19:56:52 -0700324 atomic_long_add(buffer->size, &buffer->heap->total_handles);
Laura Abbott29defcc2014-08-01 16:13:40 -0700325
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800326 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800327 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800328}
329
330static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
331{
332 /*
333 * when a buffer is removed from a handle, if it is not in
334 * any other handles, copy the taskcomm and the pid of the
335 * process it's being removed from into the buffer. At this
336 * point there will be no way to track what processes this buffer is
337 * being used by, it only exists as a dma_buf file descriptor.
338 * The taskcomm and pid can provide a debug hint as to where this fd
339 * is in the system
340 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800341 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800342 buffer->handle_count--;
343 BUG_ON(buffer->handle_count < 0);
344 if (!buffer->handle_count) {
345 struct task_struct *task;
346
347 task = current->group_leader;
348 get_task_comm(buffer->task_comm, task);
349 buffer->pid = task_pid_nr(task);
Patrick Dalye4640062017-08-01 19:56:52 -0700350 atomic_long_sub(buffer->size, &buffer->heap->total_handles);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800351 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800352 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800353}
354
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800355static struct ion_handle *ion_handle_create(struct ion_client *client,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700356 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800357{
358 struct ion_handle *handle;
359
Ben Marsh411059f2016-03-28 19:26:19 +0200360 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800361 if (!handle)
362 return ERR_PTR(-ENOMEM);
363 kref_init(&handle->ref);
364 RB_CLEAR_NODE(&handle->node);
365 handle->client = client;
366 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800367 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800368 handle->buffer = buffer;
369
370 return handle;
371}
372
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800373static void ion_handle_kmap_put(struct ion_handle *);
374
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800375static void ion_handle_destroy(struct kref *kref)
376{
377 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800378 struct ion_client *client = handle->client;
379 struct ion_buffer *buffer = handle->buffer;
380
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800381 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800382 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800383 ion_handle_kmap_put(handle);
384 mutex_unlock(&buffer->lock);
385
Colin Cross47b40452013-12-13 14:24:50 -0800386 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800387 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800388 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800389
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800390 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800391 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800392
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800393 kfree(handle);
394}
395
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700396struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
397{
398 return handle->buffer;
399}
400
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800401static void ion_handle_get(struct ion_handle *handle)
402{
403 kref_get(&handle->ref);
404}
405
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800406/* Must hold the client lock */
407static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
408{
409 if (atomic_read(&handle->ref.refcount) + 1 == 0)
410 return ERR_PTR(-EOVERFLOW);
411 ion_handle_get(handle);
412 return handle;
413}
414
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700415static int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000416{
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700417 int ret;
418
419 ret = kref_put(&handle->ref, ion_handle_destroy);
420
421 return ret;
EunTaik Lee95902322016-02-24 04:38:06 +0000422}
423
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700424int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800425{
Colin Cross83271f62013-12-13 14:24:59 -0800426 struct ion_client *client = handle->client;
427 int ret;
428
429 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000430 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800431 mutex_unlock(&client->lock);
432
433 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800434}
435
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800436/* Must hold the client lock */
437static void user_ion_handle_get(struct ion_handle *handle)
438{
439 if (handle->user_ref_count++ == 0)
440 kref_get(&handle->ref);
441}
442
443/* Must hold the client lock */
444static struct ion_handle *user_ion_handle_get_check_overflow(
445 struct ion_handle *handle)
446{
447 if (handle->user_ref_count + 1 == 0)
448 return ERR_PTR(-EOVERFLOW);
449 user_ion_handle_get(handle);
450 return handle;
451}
452
453/* passes a kref to the user ref count.
454 * We know we're holding a kref to the object before and
455 * after this call, so no need to reverify handle.
456 */
457static struct ion_handle *pass_to_user(struct ion_handle *handle)
458{
459 struct ion_client *client = handle->client;
460 struct ion_handle *ret;
461
462 mutex_lock(&client->lock);
463 ret = user_ion_handle_get_check_overflow(handle);
464 ion_handle_put_nolock(handle);
465 mutex_unlock(&client->lock);
466 return ret;
467}
468
469/* Must hold the client lock */
470static int user_ion_handle_put_nolock(struct ion_handle *handle)
471{
Daniel Rosenberg5058f3c2017-04-04 14:27:16 -0700472 int ret = 0;
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800473
474 if (--handle->user_ref_count == 0)
475 ret = ion_handle_put_nolock(handle);
476
477 return ret;
478}
479
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800480static struct ion_handle *ion_handle_lookup(struct ion_client *client,
481 struct ion_buffer *buffer)
482{
Colin Crosse1cf3682013-12-13 14:24:51 -0800483 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484
Colin Crosse1cf3682013-12-13 14:24:51 -0800485 while (n) {
486 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900487
Colin Crosse1cf3682013-12-13 14:24:51 -0800488 if (buffer < entry->buffer)
489 n = n->rb_left;
490 else if (buffer > entry->buffer)
491 n = n->rb_right;
492 else
493 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800494 }
Colin Cross9e907652013-12-13 14:24:49 -0800495 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800496}
497
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700498static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
499 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000500{
501 struct ion_handle *handle;
502
503 handle = idr_find(&client->idr, id);
504 if (handle)
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800505 return ion_handle_get_check_overflow(handle);
EunTaik Lee95902322016-02-24 04:38:06 +0000506
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800507 return ERR_PTR(-EINVAL);
EunTaik Lee95902322016-02-24 04:38:06 +0000508}
509
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700510struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
511 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800512{
Colin Cross83271f62013-12-13 14:24:59 -0800513 struct ion_handle *handle;
514
515 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000516 handle = ion_handle_get_by_id_nolock(client, id);
Colin Cross83271f62013-12-13 14:24:59 -0800517 mutex_unlock(&client->lock);
518
EunTaik Lee95902322016-02-24 04:38:06 +0000519 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800520}
521
John Stultze1d855b2013-12-13 19:26:33 -0800522static bool ion_handle_validate(struct ion_client *client,
523 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800524{
Colin Cross83271f62013-12-13 14:24:59 -0800525 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900526 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800527}
528
529static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
530{
Colin Crossb26661d2013-12-13 14:25:05 -0800531 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800532 struct rb_node **p = &client->handles.rb_node;
533 struct rb_node *parent = NULL;
534 struct ion_handle *entry;
535
Colin Crossb26661d2013-12-13 14:25:05 -0800536 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
537 if (id < 0)
538 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800539
Colin Crossb26661d2013-12-13 14:25:05 -0800540 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800541
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800542 while (*p) {
543 parent = *p;
544 entry = rb_entry(parent, struct ion_handle, node);
545
Colin Crosse1cf3682013-12-13 14:24:51 -0800546 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800547 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800548 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800549 p = &(*p)->rb_right;
550 else
551 WARN(1, "%s: buffer already found.", __func__);
552 }
553
554 rb_link_node(&handle->node, parent, p);
555 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800556
557 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800558}
559
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700560static struct ion_handle *__ion_alloc(
561 struct ion_client *client, size_t len,
562 size_t align, unsigned int heap_id_mask,
563 unsigned int flags, bool grab_handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800564{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800565 struct ion_handle *handle;
566 struct ion_device *dev = client->dev;
567 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800568 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800569 int ret;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700570 const unsigned int MAX_DBG_STR_LEN = 64;
571 char dbg_str[MAX_DBG_STR_LEN];
572 unsigned int dbg_str_idx = 0;
573
574 dbg_str[0] = '\0';
575
576 /*
577 * For now, we don't want to fault in pages individually since
578 * clients are already doing manual cache maintenance. In
579 * other words, the implicit caching infrastructure is in
580 * place (in code) but should not be used.
581 */
582 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800583
Colin Crosse61fc912013-12-13 19:26:14 -0800584 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800585 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800586 /*
587 * traverse the list of heaps available in this system in priority
588 * order. If the heap type is supported by the client, and matches the
589 * request of the caller allocate from it. Repeat until allocate has
590 * succeeded or all heaps have been tried
591 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800592 len = PAGE_ALIGN(len);
593
Colin Crossa14baf72013-12-13 14:25:00 -0800594 if (!len)
595 return ERR_PTR(-EINVAL);
596
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800597 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800598 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800599 /* if the caller didn't specify this heap id */
600 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800601 continue;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700602 trace_ion_alloc_buffer_start(client->name, heap->name, len,
603 heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800604 buffer = ion_buffer_create(heap, dev, len, align, flags);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700605 trace_ion_alloc_buffer_end(client->name, heap->name, len,
606 heap_id_mask, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800607 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800608 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700609
610 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
611 heap_id_mask, flags,
612 PTR_ERR(buffer));
613 if (dbg_str_idx < MAX_DBG_STR_LEN) {
614 unsigned int len_left;
615 int ret_value;
616
617 len_left = MAX_DBG_STR_LEN - dbg_str_idx - 1;
618 ret_value = snprintf(&dbg_str[dbg_str_idx],
619 len_left, "%s ", heap->name);
620
621 if (ret_value >= len_left) {
622 /* overflow */
623 dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
624 dbg_str_idx = MAX_DBG_STR_LEN;
625 } else if (ret_value >= 0) {
626 dbg_str_idx += ret_value;
627 } else {
628 /* error */
629 dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
630 }
631 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800632 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800633 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800634
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700635 if (!buffer) {
636 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
637 heap_id_mask, flags, -ENODEV);
KyongHo Cho54ac07842013-12-13 14:23:39 -0800638 return ERR_PTR(-ENODEV);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700639 }
KyongHo Cho54ac07842013-12-13 14:23:39 -0800640
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700641 if (IS_ERR(buffer)) {
642 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
643 heap_id_mask, flags,
644 PTR_ERR(buffer));
645 pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
646 len, align, dbg_str, client->name);
Iulia Manda464a5022014-03-11 20:14:36 +0200647 return ERR_CAST(buffer);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700648 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800649
650 handle = ion_handle_create(client, buffer);
651
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800652 /*
653 * ion_buffer_create will create a buffer with a ref_cnt of 1,
654 * and ion_handle_create will take a second reference, drop one here
655 */
656 ion_buffer_put(buffer);
657
Colin Cross47b40452013-12-13 14:24:50 -0800658 if (IS_ERR(handle))
659 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800660
Colin Cross47b40452013-12-13 14:24:50 -0800661 mutex_lock(&client->lock);
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700662 if (grab_handle)
663 ion_handle_get(handle);
Colin Cross47b40452013-12-13 14:24:50 -0800664 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800665 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800666 if (ret) {
667 ion_handle_put(handle);
668 handle = ERR_PTR(ret);
669 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800670
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800671 return handle;
672}
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700673
674struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
675 size_t align, unsigned int heap_id_mask,
676 unsigned int flags)
677{
678 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
679}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800680EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800681
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700682static void ion_free_nolock(struct ion_client *client,
683 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800684{
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700685 bool valid_handle;
686
687 WARN_ON(client != handle->client);
688
689 valid_handle = ion_handle_validate(client, handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700690 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800691 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800692 return;
693 }
EunTaik Lee95902322016-02-24 04:38:06 +0000694 ion_handle_put_nolock(handle);
695}
696
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800697static void user_ion_free_nolock(struct ion_client *client,
698 struct ion_handle *handle)
699{
700 bool valid_handle;
701
702 WARN_ON(client != handle->client);
703
704 valid_handle = ion_handle_validate(client, handle);
705 if (!valid_handle) {
706 WARN(1, "%s: invalid handle passed to free.\n", __func__);
707 return;
708 }
Sudarshan Rajagopalana17012a2017-05-01 15:52:29 -0700709 if (handle->user_ref_count == 0) {
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800710 WARN(1, "%s: User does not have access!\n", __func__);
711 return;
712 }
713 user_ion_handle_put_nolock(handle);
714}
715
EunTaik Lee95902322016-02-24 04:38:06 +0000716void ion_free(struct ion_client *client, struct ion_handle *handle)
717{
718 BUG_ON(client != handle->client);
719
720 mutex_lock(&client->lock);
721 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800722 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800723}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800724EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800725
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700726int ion_phys(struct ion_client *client, struct ion_handle *handle,
727 ion_phys_addr_t *addr, size_t *len)
728{
729 struct ion_buffer *buffer;
730 int ret;
731
732 mutex_lock(&client->lock);
733 if (!ion_handle_validate(client, handle)) {
734 mutex_unlock(&client->lock);
735 return -EINVAL;
736 }
737
738 buffer = handle->buffer;
739
740 if (!buffer->heap->ops->phys) {
741 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
742 __func__, buffer->heap->name, buffer->heap->type);
743 mutex_unlock(&client->lock);
744 return -ENODEV;
745 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700746 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
Patrick Daly60f0d9a2017-06-30 17:16:21 -0700747 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700748 return ret;
749}
750EXPORT_SYMBOL(ion_phys);
751
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800752static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
753{
754 void *vaddr;
755
756 if (buffer->kmap_cnt) {
757 buffer->kmap_cnt++;
758 return buffer->vaddr;
759 }
760 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800761 if (WARN_ONCE(vaddr == NULL,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700762 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800763 return ERR_PTR(-EINVAL);
764 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800765 return vaddr;
766 buffer->vaddr = vaddr;
767 buffer->kmap_cnt++;
768 return vaddr;
769}
770
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800771static void *ion_handle_kmap_get(struct ion_handle *handle)
772{
773 struct ion_buffer *buffer = handle->buffer;
774 void *vaddr;
775
776 if (handle->kmap_cnt) {
777 handle->kmap_cnt++;
778 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800779 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800780 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800781 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800782 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800783 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800784 return vaddr;
785}
786
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800787static void ion_buffer_kmap_put(struct ion_buffer *buffer)
788{
789 buffer->kmap_cnt--;
790 if (!buffer->kmap_cnt) {
791 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
792 buffer->vaddr = NULL;
793 }
794}
795
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800796static void ion_handle_kmap_put(struct ion_handle *handle)
797{
798 struct ion_buffer *buffer = handle->buffer;
799
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700800 if (!handle->kmap_cnt) {
801 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
802 return;
803 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800804 handle->kmap_cnt--;
805 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800806 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800807}
808
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800809void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
810{
811 struct ion_buffer *buffer;
812 void *vaddr;
813
814 mutex_lock(&client->lock);
815 if (!ion_handle_validate(client, handle)) {
816 pr_err("%s: invalid handle passed to map_kernel.\n",
817 __func__);
818 mutex_unlock(&client->lock);
819 return ERR_PTR(-EINVAL);
820 }
821
822 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800823
824 if (!handle->buffer->heap->ops->map_kernel) {
825 pr_err("%s: map_kernel is not implemented by this heap.\n",
826 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800827 mutex_unlock(&client->lock);
828 return ERR_PTR(-ENODEV);
829 }
830
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800831 mutex_lock(&buffer->lock);
832 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800833 mutex_unlock(&buffer->lock);
834 mutex_unlock(&client->lock);
835 return vaddr;
836}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800837EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800838
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800839void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
840{
841 struct ion_buffer *buffer;
842
843 mutex_lock(&client->lock);
844 buffer = handle->buffer;
845 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800846 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800847 mutex_unlock(&buffer->lock);
848 mutex_unlock(&client->lock);
849}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800850EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800851
Neil Zhang948c4db2016-01-26 17:39:06 +0800852static struct rb_root *ion_root_client;
Neil Zhang948c4db2016-01-26 17:39:06 +0800853
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800854static int ion_debug_client_show(struct seq_file *s, void *unused)
855{
856 struct ion_client *client = s->private;
857 struct rb_node *n;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700858
859 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
860 "heap_name", "size_in_bytes", "handle refcount",
861 "buffer");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800862
863 mutex_lock(&client->lock);
864 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
865 struct ion_handle *handle = rb_entry(n, struct ion_handle,
866 node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800867
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700868 seq_printf(s, "%16.16s: %16zx : %16d : %12p",
869 handle->buffer->heap->name,
870 handle->buffer->size,
871 atomic_read(&handle->ref.refcount),
872 handle->buffer);
873
874 seq_puts(s, "\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800875 }
876 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800877 return 0;
878}
879
880static int ion_debug_client_open(struct inode *inode, struct file *file)
881{
882 return single_open(file, ion_debug_client_show, inode->i_private);
883}
884
885static const struct file_operations debug_client_fops = {
886 .open = ion_debug_client_open,
887 .read = seq_read,
888 .llseek = seq_lseek,
889 .release = single_release,
890};
891
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800892static int ion_get_client_serial(const struct rb_root *root,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700893 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800894{
895 int serial = -1;
896 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900897
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800898 for (node = rb_first(root); node; node = rb_next(node)) {
899 struct ion_client *client = rb_entry(node, struct ion_client,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700900 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900901
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800902 if (strcmp(client->name, name))
903 continue;
904 serial = max(serial, client->display_serial);
905 }
906 return serial + 1;
907}
908
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800909struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800910 const char *name)
911{
912 struct ion_client *client;
913 struct task_struct *task;
914 struct rb_node **p;
915 struct rb_node *parent = NULL;
916 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800917 pid_t pid;
918
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800919 if (!name) {
920 pr_err("%s: Name cannot be null\n", __func__);
921 return ERR_PTR(-EINVAL);
922 }
923
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800924 get_task_struct(current->group_leader);
925 task_lock(current->group_leader);
926 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530927 /*
928 * don't bother to store task struct for kernel threads,
929 * they can't be killed anyway
930 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800931 if (current->group_leader->flags & PF_KTHREAD) {
932 put_task_struct(current->group_leader);
933 task = NULL;
934 } else {
935 task = current->group_leader;
936 }
937 task_unlock(current->group_leader);
938
Ben Marsh411059f2016-03-28 19:26:19 +0200939 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800940 if (!client)
941 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800942
943 client->dev = dev;
944 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800945 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800946 mutex_init(&client->lock);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700947
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800948 client->task = task;
949 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800950 client->name = kstrdup(name, GFP_KERNEL);
951 if (!client->name)
952 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800953
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800954 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800955 client->display_serial = ion_get_client_serial(&dev->clients, name);
956 client->display_name = kasprintf(
957 GFP_KERNEL, "%s-%d", name, client->display_serial);
958 if (!client->display_name) {
959 up_write(&dev->lock);
960 goto err_free_client_name;
961 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800962 p = &dev->clients.rb_node;
963 while (*p) {
964 parent = *p;
965 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800966
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800967 if (client < entry)
968 p = &(*p)->rb_left;
969 else if (client > entry)
970 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800971 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800972 rb_link_node(&client->node, parent, p);
973 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800974
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800975 client->debug_root = debugfs_create_file(client->display_name, 0664,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700976 dev->clients_debug_root,
977 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800978 if (!client->debug_root) {
979 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700980
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800981 path = dentry_path(dev->clients_debug_root, buf, 256);
982 pr_err("Failed to create client debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700983 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800984 }
985
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800986 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800987
988 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800989
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800990err_free_client_name:
991 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800992err_free_client:
993 kfree(client);
994err_put_task_struct:
995 if (task)
996 put_task_struct(current->group_leader);
997 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800998}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800999EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001000
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001001void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001002{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001003 struct ion_device *dev = client->dev;
1004 struct rb_node *n;
1005
1006 pr_debug("%s: %d\n", __func__, __LINE__);
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001007 down_write(&dev->lock);
1008 rb_erase(&client->node, &dev->clients);
1009 up_write(&dev->lock);
1010
1011 /* After this completes, there are no more references to client */
1012 debugfs_remove_recursive(client->debug_root);
1013
1014 mutex_lock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001015 while ((n = rb_first(&client->handles))) {
1016 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1017 node);
1018 ion_handle_destroy(&handle->ref);
1019 }
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001020 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001021
Colin Cross47b40452013-12-13 14:24:50 -08001022 idr_destroy(&client->idr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001023 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001024 put_task_struct(client->task);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -08001025 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -08001026 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001027 kfree(client);
1028}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001029EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001030
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001031int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1032 unsigned long *flags)
1033{
1034 struct ion_buffer *buffer;
1035
1036 mutex_lock(&client->lock);
1037 if (!ion_handle_validate(client, handle)) {
1038 pr_err("%s: invalid handle passed to %s.\n",
1039 __func__, __func__);
1040 mutex_unlock(&client->lock);
1041 return -EINVAL;
1042 }
1043 buffer = handle->buffer;
1044 mutex_lock(&buffer->lock);
1045 *flags = buffer->flags;
1046 mutex_unlock(&buffer->lock);
1047 mutex_unlock(&client->lock);
1048
1049 return 0;
1050}
1051EXPORT_SYMBOL(ion_handle_get_flags);
1052
1053int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1054 size_t *size)
1055{
1056 struct ion_buffer *buffer;
1057
1058 mutex_lock(&client->lock);
1059 if (!ion_handle_validate(client, handle)) {
1060 pr_err("%s: invalid handle passed to %s.\n",
1061 __func__, __func__);
1062 mutex_unlock(&client->lock);
1063 return -EINVAL;
1064 }
1065 buffer = handle->buffer;
1066 mutex_lock(&buffer->lock);
1067 *size = buffer->size;
1068 mutex_unlock(&buffer->lock);
1069 mutex_unlock(&client->lock);
1070
1071 return 0;
1072}
1073EXPORT_SYMBOL(ion_handle_get_size);
1074
Laura Abbott29defcc2014-08-01 16:13:40 -07001075/**
1076 * ion_sg_table - get an sg_table for the buffer
1077 *
1078 * NOTE: most likely you should NOT being using this API.
1079 * You should be using Ion as a DMA Buf exporter and using
1080 * the sg_table returned by dma_buf_map_attachment.
1081 */
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001082struct sg_table *ion_sg_table(struct ion_client *client,
1083 struct ion_handle *handle)
1084{
1085 struct ion_buffer *buffer;
1086 struct sg_table *table;
1087
1088 mutex_lock(&client->lock);
1089 if (!ion_handle_validate(client, handle)) {
1090 pr_err("%s: invalid handle passed to map_dma.\n",
1091 __func__);
1092 mutex_unlock(&client->lock);
1093 return ERR_PTR(-EINVAL);
1094 }
1095 buffer = handle->buffer;
1096 table = buffer->sg_table;
1097 mutex_unlock(&client->lock);
1098 return table;
1099}
1100EXPORT_SYMBOL(ion_sg_table);
1101
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001102struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1103 size_t chunk_size,
1104 size_t total_size)
1105{
1106 struct sg_table *table;
1107 int i, n_chunks, ret;
1108 struct scatterlist *sg;
1109
1110 table = kzalloc(sizeof(*table), GFP_KERNEL);
1111 if (!table)
1112 return ERR_PTR(-ENOMEM);
1113
1114 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1115 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1116
1117 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1118 if (ret)
1119 goto err0;
1120
1121 for_each_sg(table->sgl, sg, table->nents, i) {
1122 dma_addr_t addr = buffer_base + i * chunk_size;
1123
1124 sg_dma_address(sg) = addr;
1125 sg->length = chunk_size;
1126 }
1127
1128 return table;
1129err0:
1130 kfree(table);
1131 return ERR_PTR(ret);
1132}
1133
Laura Abbott29defcc2014-08-01 16:13:40 -07001134static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table)
1135{
1136 int ret, i;
1137 struct scatterlist *sg, *sg_orig;
1138 struct sg_table *table;
1139
1140 table = kzalloc(sizeof(*table), GFP_KERNEL);
1141 if (!table)
1142 return NULL;
1143
1144 ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL);
1145 if (ret) {
1146 kfree(table);
1147 return NULL;
1148 }
1149
1150 sg_orig = orig_table->sgl;
1151 for_each_sg(table->sgl, sg, table->nents, i) {
1152 memcpy(sg, sg_orig, sizeof(*sg));
1153 sg_orig = sg_next(sg_orig);
1154 }
1155 return table;
1156}
1157
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001158static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1159 struct device *dev,
1160 enum dma_data_direction direction);
1161
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001162static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1163 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001164{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001165 struct dma_buf *dmabuf = attachment->dmabuf;
1166 struct ion_buffer *buffer = dmabuf->priv;
Laura Abbott29defcc2014-08-01 16:13:40 -07001167 struct sg_table *table;
1168
1169 table = ion_dupe_sg_table(buffer->sg_table);
1170 if (!table)
1171 return NULL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001172
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001173 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbott29defcc2014-08-01 16:13:40 -07001174 return table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001175}
1176
1177static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1178 struct sg_table *table,
1179 enum dma_data_direction direction)
1180{
Laura Abbott29defcc2014-08-01 16:13:40 -07001181 sg_free_table(table);
1182 kfree(table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001183}
1184
Colin Crosse946b202013-12-13 14:25:01 -08001185void ion_pages_sync_for_device(struct device *dev, struct page *page,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001186 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -08001187{
1188 struct scatterlist sg;
1189
Laura Abbott29defcc2014-08-01 16:13:40 -07001190 WARN_ONCE(!dev, "A device is required for dma_sync\n");
1191
Colin Crosse946b202013-12-13 14:25:01 -08001192 sg_init_table(&sg, 1);
1193 sg_set_page(&sg, page, size, 0);
1194 /*
1195 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +05301196 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -08001197 * hardware.
1198 */
1199 sg_dma_address(&sg) = page_to_phys(page);
1200 dma_sync_sg_for_device(dev, &sg, 1, dir);
1201}
1202
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001203struct ion_vma_list {
1204 struct list_head list;
1205 struct vm_area_struct *vma;
1206};
1207
1208static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1209 struct device *dev,
1210 enum dma_data_direction dir)
1211{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001212 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001213 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1214 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001215
1216 pr_debug("%s: syncing for device %s\n", __func__,
1217 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001218
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001219 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001220 return;
1221
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001222 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001223 for (i = 0; i < pages; i++) {
1224 struct page *page = buffer->pages[i];
1225
1226 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -08001227 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001228 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -08001229
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001230 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001231 }
1232 list_for_each_entry(vma_list, &buffer->vmas, list) {
1233 struct vm_area_struct *vma = vma_list->vma;
1234
1235 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1236 NULL);
1237 }
1238 mutex_unlock(&buffer->lock);
1239}
1240
Colin Crossf63958d2013-12-13 19:26:28 -08001241static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001242{
1243 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -08001244 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001245 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001246
1247 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001248 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001249 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -08001250
1251 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1252 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001253 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001254 if (ret)
1255 return VM_FAULT_ERROR;
1256
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001257 return VM_FAULT_NOPAGE;
1258}
1259
1260static void ion_vm_open(struct vm_area_struct *vma)
1261{
1262 struct ion_buffer *buffer = vma->vm_private_data;
1263 struct ion_vma_list *vma_list;
1264
Ben Marsh411059f2016-03-28 19:26:19 +02001265 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001266 if (!vma_list)
1267 return;
1268 vma_list->vma = vma;
1269 mutex_lock(&buffer->lock);
1270 list_add(&vma_list->list, &buffer->vmas);
1271 mutex_unlock(&buffer->lock);
Satyajit Desai3c702822016-09-02 14:18:13 -07001272 pr_debug("%s: adding %pK\n", __func__, vma);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001273}
1274
1275static void ion_vm_close(struct vm_area_struct *vma)
1276{
1277 struct ion_buffer *buffer = vma->vm_private_data;
1278 struct ion_vma_list *vma_list, *tmp;
1279
1280 pr_debug("%s\n", __func__);
1281 mutex_lock(&buffer->lock);
1282 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1283 if (vma_list->vma != vma)
1284 continue;
1285 list_del(&vma_list->list);
1286 kfree(vma_list);
Satyajit Desai3c702822016-09-02 14:18:13 -07001287 pr_debug("%s: deleting %pK\n", __func__, vma);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001288 break;
1289 }
1290 mutex_unlock(&buffer->lock);
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001291
1292 if (buffer->heap->ops->unmap_user)
1293 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001294}
1295
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001296static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001297 .open = ion_vm_open,
1298 .close = ion_vm_close,
1299 .fault = ion_vm_fault,
1300};
1301
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001302static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001303{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001304 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001305 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001306
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001307 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +02001308 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001309 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001310 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001311 }
1312
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001313 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -08001314 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1315 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001316 vma->vm_private_data = buffer;
1317 vma->vm_ops = &ion_vma_ops;
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001318 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001319 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001320 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001321 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001322
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001323 if (!(buffer->flags & ION_FLAG_CACHED))
1324 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1325
1326 mutex_lock(&buffer->lock);
1327 /* now map it to userspace */
1328 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1329 mutex_unlock(&buffer->lock);
1330
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001331 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001332 pr_err("%s: failure mapping buffer to userspace\n",
1333 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001334
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001335 return ret;
1336}
1337
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001338static void ion_dma_buf_release(struct dma_buf *dmabuf)
1339{
1340 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001341
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001342 ion_buffer_put(buffer);
1343}
1344
1345static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1346{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001347 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001348
Greg Hackmann12edf532013-12-13 14:24:00 -08001349 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001350}
1351
1352static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1353 void *ptr)
1354{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001355}
1356
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001357static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001358 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001359{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001360 struct ion_buffer *buffer = dmabuf->priv;
1361 void *vaddr;
1362
1363 if (!buffer->heap->ops->map_kernel) {
1364 pr_err("%s: map kernel is not implemented by this heap.\n",
1365 __func__);
1366 return -ENODEV;
1367 }
1368
1369 mutex_lock(&buffer->lock);
1370 vaddr = ion_buffer_kmap_get(buffer);
1371 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301372 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001373}
1374
Chris Wilson18b862d2016-03-18 20:02:39 +00001375static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1376 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001377{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001378 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001379
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001380 mutex_lock(&buffer->lock);
1381 ion_buffer_kmap_put(buffer);
1382 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001383
1384 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001385}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001386
Colin Crossf63958d2013-12-13 19:26:28 -08001387static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001388 .map_dma_buf = ion_map_dma_buf,
1389 .unmap_dma_buf = ion_unmap_dma_buf,
1390 .mmap = ion_mmap,
1391 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001392 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1393 .end_cpu_access = ion_dma_buf_end_cpu_access,
1394 .kmap_atomic = ion_dma_buf_kmap,
1395 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001396 .kmap = ion_dma_buf_kmap,
1397 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001398};
1399
Johan Mossberg22ba4322013-12-13 14:24:34 -08001400struct dma_buf *ion_share_dma_buf(struct ion_client *client,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001401 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001402{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001403 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001404 struct ion_buffer *buffer;
1405 struct dma_buf *dmabuf;
1406 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301407
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001408 mutex_lock(&client->lock);
1409 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001410 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001411 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001412 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001413 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001414 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001415 buffer = handle->buffer;
1416 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001417 mutex_unlock(&client->lock);
1418
Sumit Semwal72449cb2015-02-21 09:00:17 +05301419 exp_info.ops = &dma_buf_ops;
1420 exp_info.size = buffer->size;
1421 exp_info.flags = O_RDWR;
1422 exp_info.priv = buffer;
1423
Sumit Semwald8fbe342015-01-23 12:53:43 +05301424 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001425 if (IS_ERR(dmabuf)) {
1426 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001427 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001428 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001429
1430 return dmabuf;
1431}
1432EXPORT_SYMBOL(ion_share_dma_buf);
1433
1434int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1435{
1436 struct dma_buf *dmabuf;
1437 int fd;
1438
1439 dmabuf = ion_share_dma_buf(client, handle);
1440 if (IS_ERR(dmabuf))
1441 return PTR_ERR(dmabuf);
1442
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001443 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001444 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001445 dma_buf_put(dmabuf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001446 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001447}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001448EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001449
Rohit kumar9f903812016-01-12 09:31:46 +05301450struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1451 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001452{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001453 struct ion_buffer *buffer;
1454 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001455 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001456
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001457 /* if this memory came from ion */
1458
1459 if (dmabuf->ops != &dma_buf_ops) {
1460 pr_err("%s: can not import dmabuf from another exporter\n",
1461 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001462 return ERR_PTR(-EINVAL);
1463 }
1464 buffer = dmabuf->priv;
1465
1466 mutex_lock(&client->lock);
1467 /* if a handle exists for this buffer just take a reference to it */
1468 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001469 if (!IS_ERR(handle)) {
Daniel Rosenberg20746c12016-12-05 16:28:28 -08001470 handle = ion_handle_get_check_overflow(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001471 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001472 goto end;
1473 }
Colin Cross83271f62013-12-13 14:24:59 -08001474
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001475 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001476 if (IS_ERR(handle)) {
1477 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001478 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001479 }
Colin Cross83271f62013-12-13 14:24:59 -08001480
Colin Cross47b40452013-12-13 14:24:50 -08001481 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001482 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001483 if (ret) {
1484 ion_handle_put(handle);
1485 handle = ERR_PTR(ret);
1486 }
Colin Cross83271f62013-12-13 14:24:59 -08001487
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001488end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001489 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001490}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001491EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001492
Rohit kumar9f903812016-01-12 09:31:46 +05301493struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1494{
1495 struct dma_buf *dmabuf;
1496 struct ion_handle *handle;
1497
1498 dmabuf = dma_buf_get(fd);
1499 if (IS_ERR(dmabuf))
1500 return ERR_CAST(dmabuf);
1501
1502 handle = ion_import_dma_buf(client, dmabuf);
1503 dma_buf_put(dmabuf);
1504 return handle;
1505}
1506EXPORT_SYMBOL(ion_import_dma_buf_fd);
1507
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001508static int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001509{
1510 struct dma_buf *dmabuf;
1511 struct ion_buffer *buffer;
1512
1513 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001514 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001515 return PTR_ERR(dmabuf);
1516
1517 /* if this memory came from ion */
1518 if (dmabuf->ops != &dma_buf_ops) {
1519 pr_err("%s: can not sync dmabuf from another exporter\n",
1520 __func__);
1521 dma_buf_put(dmabuf);
1522 return -EINVAL;
1523 }
1524 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001525
Liam Mark53261412017-12-04 10:58:55 -08001526 if (!is_buffer_hlos_assigned(buffer)) {
1527 pr_err("%s: cannot sync a secure dmabuf\n", __func__);
1528 dma_buf_put(dmabuf);
1529 return -EINVAL;
1530 }
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001531 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1532 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001533 dma_buf_put(dmabuf);
1534 return 0;
1535}
1536
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001537/* fix up the cases where the ioctl direction bits are incorrect */
1538static unsigned int ion_ioctl_dir(unsigned int cmd)
Laura Abbott02b23802016-09-07 11:49:59 -07001539{
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001540 switch (cmd) {
1541 case ION_IOC_SYNC:
1542 case ION_IOC_FREE:
1543 case ION_IOC_CUSTOM:
1544 return _IOC_WRITE;
1545 default:
1546 return _IOC_DIR(cmd);
1547 }
1548}
1549
1550static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1551{
1552 struct ion_client *client = filp->private_data;
Laura Abbott02b23802016-09-07 11:49:59 -07001553 struct ion_device *dev = client->dev;
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001554 struct ion_handle *cleanup_handle = NULL;
1555 int ret = 0;
1556 unsigned int dir;
Laura Abbott02b23802016-09-07 11:49:59 -07001557
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001558 union {
1559 struct ion_fd_data fd;
1560 struct ion_allocation_data allocation;
1561 struct ion_handle_data handle;
1562 struct ion_custom_data custom;
1563 } data;
Laura Abbott02b23802016-09-07 11:49:59 -07001564
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001565 dir = ion_ioctl_dir(cmd);
1566
1567 if (_IOC_SIZE(cmd) > sizeof(data))
1568 return -EINVAL;
1569
1570 if (dir & _IOC_WRITE)
1571 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1572 return -EFAULT;
1573
1574 switch (cmd) {
1575 case ION_IOC_ALLOC:
1576 {
1577 struct ion_handle *handle;
1578
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001579 handle = __ion_alloc(client, data.allocation.len,
1580 data.allocation.align,
1581 data.allocation.heap_id_mask,
1582 data.allocation.flags, true);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001583 if (IS_ERR(handle))
1584 return PTR_ERR(handle);
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001585 pass_to_user(handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001586 data.allocation.handle = handle->id;
1587
1588 cleanup_handle = handle;
1589 break;
1590 }
1591 case ION_IOC_FREE:
1592 {
1593 struct ion_handle *handle;
1594
1595 mutex_lock(&client->lock);
1596 handle = ion_handle_get_by_id_nolock(client,
1597 data.handle.handle);
1598 if (IS_ERR(handle)) {
1599 mutex_unlock(&client->lock);
1600 return PTR_ERR(handle);
1601 }
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001602 user_ion_free_nolock(client, handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001603 ion_handle_put_nolock(handle);
1604 mutex_unlock(&client->lock);
1605 break;
1606 }
1607 case ION_IOC_SHARE:
1608 case ION_IOC_MAP:
1609 {
1610 struct ion_handle *handle;
1611
1612 handle = ion_handle_get_by_id(client, data.handle.handle);
1613 if (IS_ERR(handle))
1614 return PTR_ERR(handle);
1615 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1616 ion_handle_put(handle);
1617 if (data.fd.fd < 0)
1618 ret = data.fd.fd;
1619 break;
1620 }
1621 case ION_IOC_IMPORT:
1622 {
1623 struct ion_handle *handle;
1624
1625 handle = ion_import_dma_buf_fd(client, data.fd.fd);
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001626 if (IS_ERR(handle)) {
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001627 ret = PTR_ERR(handle);
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001628 } else {
1629 handle = pass_to_user(handle);
1630 if (IS_ERR(handle))
1631 ret = PTR_ERR(handle);
1632 else
1633 data.handle.handle = handle->id;
1634 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001635 break;
1636 }
1637 case ION_IOC_SYNC:
1638 {
1639 ret = ion_sync_for_device(client, data.fd.fd);
1640 break;
1641 }
1642 case ION_IOC_CUSTOM:
1643 {
1644 if (!dev->custom_ioctl)
1645 return -ENOTTY;
1646 ret = dev->custom_ioctl(client, data.custom.cmd,
1647 data.custom.arg);
1648 break;
1649 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001650 case ION_IOC_CLEAN_CACHES:
1651 return client->dev->custom_ioctl(client,
1652 ION_IOC_CLEAN_CACHES, arg);
1653 case ION_IOC_INV_CACHES:
1654 return client->dev->custom_ioctl(client,
1655 ION_IOC_INV_CACHES, arg);
1656 case ION_IOC_CLEAN_INV_CACHES:
1657 return client->dev->custom_ioctl(client,
1658 ION_IOC_CLEAN_INV_CACHES, arg);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001659 default:
1660 return -ENOTTY;
Laura Abbott02b23802016-09-07 11:49:59 -07001661 }
1662
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001663 if (dir & _IOC_READ) {
1664 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001665 if (cleanup_handle) {
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001666 mutex_lock(&client->lock);
1667 user_ion_free_nolock(client, cleanup_handle);
1668 ion_handle_put_nolock(cleanup_handle);
1669 mutex_unlock(&client->lock);
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001670 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001671 return -EFAULT;
1672 }
Laura Abbott02b23802016-09-07 11:49:59 -07001673 }
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001674 if (cleanup_handle)
1675 ion_handle_put(cleanup_handle);
Laura Abbott02b23802016-09-07 11:49:59 -07001676 return ret;
1677}
1678
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001679static int ion_release(struct inode *inode, struct file *file)
1680{
1681 struct ion_client *client = file->private_data;
1682
1683 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001684 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001685 return 0;
1686}
1687
1688static int ion_open(struct inode *inode, struct file *file)
1689{
1690 struct miscdevice *miscdev = file->private_data;
1691 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1692 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001693 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001694
1695 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001696 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1697 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001698 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001699 return PTR_ERR(client);
1700 file->private_data = client;
1701
1702 return 0;
1703}
1704
1705static const struct file_operations ion_fops = {
1706 .owner = THIS_MODULE,
1707 .open = ion_open,
1708 .release = ion_release,
1709 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001710 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001711};
1712
1713static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001714 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001715{
1716 size_t size = 0;
1717 struct rb_node *n;
1718
1719 mutex_lock(&client->lock);
1720 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1721 struct ion_handle *handle = rb_entry(n,
1722 struct ion_handle,
1723 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001724 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001725 size += handle->buffer->size;
1726 }
1727 mutex_unlock(&client->lock);
1728 return size;
1729}
1730
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001731/**
1732 * Create a mem_map of the heap.
1733 * @param s seq_file to log error message to.
1734 * @param heap The heap to create mem_map for.
1735 * @param mem_map The mem map to be created.
1736 */
1737void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1738 struct list_head *mem_map)
1739{
1740 struct ion_device *dev = heap->dev;
1741 struct rb_node *cnode;
1742 size_t size;
1743 struct ion_client *client;
1744
1745 if (!heap->ops->phys)
1746 return;
1747
1748 down_read(&dev->lock);
1749 for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
1750 struct rb_node *hnode;
1751
1752 client = rb_entry(cnode, struct ion_client, node);
1753
1754 mutex_lock(&client->lock);
1755 for (hnode = rb_first(&client->handles);
1756 hnode;
1757 hnode = rb_next(hnode)) {
1758 struct ion_handle *handle = rb_entry(
1759 hnode, struct ion_handle, node);
1760 if (handle->buffer->heap == heap) {
1761 struct mem_map_data *data =
1762 kzalloc(sizeof(*data), GFP_KERNEL);
1763 if (!data)
1764 goto inner_error;
1765 heap->ops->phys(heap, handle->buffer,
1766 &data->addr, &size);
1767 data->size = (unsigned long)size;
1768 data->addr_end = data->addr + data->size - 1;
1769 data->client_name = kstrdup(client->name,
1770 GFP_KERNEL);
1771 if (!data->client_name) {
1772 kfree(data);
1773 goto inner_error;
1774 }
1775 list_add(&data->node, mem_map);
1776 }
1777 }
1778 mutex_unlock(&client->lock);
1779 }
1780 up_read(&dev->lock);
1781 return;
1782
1783inner_error:
1784 seq_puts(s,
1785 "ERROR: out of memory. Part of memory map will not be logged\n");
1786 mutex_unlock(&client->lock);
1787 up_read(&dev->lock);
1788}
1789
1790/**
1791 * Free the memory allocated by ion_debug_mem_map_create
1792 * @param mem_map The mem map to free.
1793 */
1794static void ion_debug_mem_map_destroy(struct list_head *mem_map)
1795{
1796 if (mem_map) {
1797 struct mem_map_data *data, *tmp;
1798
1799 list_for_each_entry_safe(data, tmp, mem_map, node) {
1800 list_del(&data->node);
1801 kfree(data->client_name);
1802 kfree(data);
1803 }
1804 }
1805}
1806
1807static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
1808{
1809 struct mem_map_data *d1, *d2;
1810
1811 d1 = list_entry(a, struct mem_map_data, node);
1812 d2 = list_entry(b, struct mem_map_data, node);
1813 if (d1->addr == d2->addr)
1814 return d1->size - d2->size;
1815 return d1->addr - d2->addr;
1816}
1817
1818/**
1819 * Print heap debug information.
1820 * @param s seq_file to log message to.
1821 * @param heap pointer to heap that we will print debug information for.
1822 */
1823static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1824{
1825 if (heap->ops->print_debug) {
1826 struct list_head mem_map = LIST_HEAD_INIT(mem_map);
1827
1828 ion_debug_mem_map_create(s, heap, &mem_map);
1829 list_sort(NULL, &mem_map, mem_map_cmp);
1830 heap->ops->print_debug(heap, s, &mem_map);
1831 ion_debug_mem_map_destroy(&mem_map);
1832 }
1833}
1834
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001835static int ion_debug_heap_show(struct seq_file *s, void *unused)
1836{
1837 struct ion_heap *heap = s->private;
1838 struct ion_device *dev = heap->dev;
1839 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001840 size_t total_size = 0;
1841 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001842
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001843 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001844 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001845
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001846 down_read(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001847 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001848 struct ion_client *client = rb_entry(n, struct ion_client,
1849 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001850 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001851
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001852 if (!size)
1853 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001854 if (client->task) {
1855 char task_comm[TASK_COMM_LEN];
1856
1857 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001858 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001859 client->pid, size);
1860 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001861 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001862 client->pid, size);
1863 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001864 }
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001865 up_read(&dev->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +08001866
Iulia Manda164ad862014-03-11 20:12:29 +02001867 seq_puts(s, "----------------------------------------------------\n");
1868 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001869 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001870 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1871 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1872 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001873 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001874 continue;
1875 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001876 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001877 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001878 buffer->task_comm, buffer->pid,
1879 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001880 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001881 total_orphaned_size += buffer->size;
1882 }
1883 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001884 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001885 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001886 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001887 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001888 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001889 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001890 seq_printf(s, "%16s %16zu\n", "deferred free",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001891 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001892 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001893
1894 if (heap->debug_show)
1895 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001896
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001897 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001898 return 0;
1899}
1900
1901static int ion_debug_heap_open(struct inode *inode, struct file *file)
1902{
1903 return single_open(file, ion_debug_heap_show, inode->i_private);
1904}
1905
1906static const struct file_operations debug_heap_fops = {
1907 .open = ion_debug_heap_open,
1908 .read = seq_read,
1909 .llseek = seq_lseek,
1910 .release = single_release,
1911};
1912
Laura Abbott29defcc2014-08-01 16:13:40 -07001913void show_ion_usage(struct ion_device *dev)
1914{
1915 struct ion_heap *heap;
1916
1917 if (!down_read_trylock(&dev->lock)) {
1918 pr_err("Ion output would deadlock, can't print debug information\n");
1919 return;
1920 }
1921
1922 pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size",
1923 "Total orphaned size");
1924 pr_info("---------------------------------\n");
1925 plist_for_each_entry(heap, &dev->heaps, node) {
Patrick Dalye4640062017-08-01 19:56:52 -07001926 pr_info("%16.s 0x%16.lx 0x%16.lx\n",
1927 heap->name, atomic_long_read(&heap->total_allocated),
1928 atomic_long_read(&heap->total_allocated) -
1929 atomic_long_read(&heap->total_handles));
Laura Abbott29defcc2014-08-01 16:13:40 -07001930 if (heap->debug_show)
1931 heap->debug_show(heap, NULL, 0);
1932 }
1933 up_read(&dev->lock);
1934}
1935
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001936static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001937{
John Stultze1d855b2013-12-13 19:26:33 -08001938 struct ion_heap *heap = data;
1939 struct shrink_control sc;
1940 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001941
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001942 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001943 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001944
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001945 if (!val) {
1946 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1947 sc.nr_to_scan = objs;
1948 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001949
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001950 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001951 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001952}
1953
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001954static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001955{
John Stultze1d855b2013-12-13 19:26:33 -08001956 struct ion_heap *heap = data;
1957 struct shrink_control sc;
1958 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001959
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001960 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001961 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001962
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001963 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001964 *val = objs;
1965 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001966}
1967
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001968DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001969 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001970
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001971void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1972{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001973 struct dentry *debug_file;
1974
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001975 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1976 !heap->ops->unmap_dma)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001977 pr_err("%s: can not add heap with invalid ops struct.\n",
1978 __func__);
1979
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001980 spin_lock_init(&heap->free_lock);
1981 heap->free_list_size = 0;
1982
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001983 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1984 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001985
Colin Crossb9daf0b2014-02-17 13:58:38 -08001986 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1987 ion_heap_init_shrinker(heap);
1988
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001989 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001990 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301991 /*
1992 * use negative heap->id to reverse the priority -- when traversing
1993 * the list later attempt higher id numbers first
1994 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001995 plist_node_init(&heap->node, -heap->id);
1996 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001997 debug_file = debugfs_create_file(heap->name, 0664,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001998 dev->heaps_debug_root, heap,
1999 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002000
2001 if (!debug_file) {
2002 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09002003
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002004 path = dentry_path(dev->heaps_debug_root, buf, 256);
2005 pr_err("Failed to create heap debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002006 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002007 }
2008
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002009 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002010 char debug_name[64];
2011
2012 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002013 debug_file = debugfs_create_file(
2014 debug_name, 0644, dev->heaps_debug_root, heap,
2015 &debug_shrink_fops);
2016 if (!debug_file) {
2017 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09002018
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002019 path = dentry_path(dev->heaps_debug_root, buf, 256);
2020 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002021 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002022 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002023 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002024
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08002025 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002026}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002027EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002028
Laura Abbott29defcc2014-08-01 16:13:40 -07002029int ion_walk_heaps(struct ion_client *client, int heap_id,
2030 enum ion_heap_type type, void *data,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002031 int (*f)(struct ion_heap *heap, void *data))
2032{
Laura Abbott29defcc2014-08-01 16:13:40 -07002033 int ret_val = 0;
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002034 struct ion_heap *heap;
2035 struct ion_device *dev = client->dev;
2036 /*
2037 * traverse the list of heaps available in this system
2038 * and find the heap that is specified.
2039 */
2040 down_write(&dev->lock);
2041 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott29defcc2014-08-01 16:13:40 -07002042 if (ION_HEAP(heap->id) != heap_id ||
2043 type != heap->type)
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002044 continue;
2045 ret_val = f(heap, data);
2046 break;
2047 }
2048 up_write(&dev->lock);
2049 return ret_val;
2050}
2051EXPORT_SYMBOL(ion_walk_heaps);
2052
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002053struct ion_device *ion_device_create(long (*custom_ioctl)
2054 (struct ion_client *client,
2055 unsigned int cmd,
2056 unsigned long arg))
2057{
2058 struct ion_device *idev;
2059 int ret;
2060
Ben Marsh411059f2016-03-28 19:26:19 +02002061 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002062 if (!idev)
2063 return ERR_PTR(-ENOMEM);
2064
2065 idev->dev.minor = MISC_DYNAMIC_MINOR;
2066 idev->dev.name = "ion";
2067 idev->dev.fops = &ion_fops;
2068 idev->dev.parent = NULL;
2069 ret = misc_register(&idev->dev);
2070 if (ret) {
2071 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05302072 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002073 return ERR_PTR(ret);
2074 }
2075
2076 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002077 if (!idev->debug_root) {
2078 pr_err("ion: failed to create debugfs root directory.\n");
2079 goto debugfs_done;
2080 }
2081 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2082 if (!idev->heaps_debug_root) {
2083 pr_err("ion: failed to create debugfs heaps directory.\n");
2084 goto debugfs_done;
2085 }
2086 idev->clients_debug_root = debugfs_create_dir("clients",
2087 idev->debug_root);
2088 if (!idev->clients_debug_root)
2089 pr_err("ion: failed to create debugfs clients directory.\n");
2090
2091debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002092
2093 idev->custom_ioctl = custom_ioctl;
2094 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08002095 mutex_init(&idev->buffer_lock);
2096 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08002097 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08002098 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08002099 ion_root_client = &idev->clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002100 return idev;
2101}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002102EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002103
2104void ion_device_destroy(struct ion_device *dev)
2105{
2106 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002107 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002108 /* XXX need to free the heaps and clients ? */
2109 kfree(dev);
2110}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002111EXPORT_SYMBOL(ion_device_destroy);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002112
2113void __init ion_reserve(struct ion_platform_data *data)
2114{
2115 int i;
2116
2117 for (i = 0; i < data->nr; i++) {
2118 if (data->heaps[i].size == 0)
2119 continue;
2120
2121 if (data->heaps[i].base == 0) {
2122 phys_addr_t paddr;
2123
2124 paddr = memblock_alloc_base(data->heaps[i].size,
2125 data->heaps[i].align,
2126 MEMBLOCK_ALLOC_ANYWHERE);
2127 if (!paddr) {
2128 pr_err("%s: error allocating memblock for heap %d\n",
2129 __func__, i);
2130 continue;
2131 }
2132 data->heaps[i].base = paddr;
2133 } else {
2134 int ret = memblock_reserve(data->heaps[i].base,
2135 data->heaps[i].size);
2136 if (ret)
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002137 pr_err("memblock reserve of %zx@%pa failed\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002138 data->heaps[i].size,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002139 &data->heaps[i].base);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002140 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002141 pr_info("%s: %s reserved base %pa size %zu\n", __func__,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002142 data->heaps[i].name,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002143 &data->heaps[i].base,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002144 data->heaps[i].size);
2145 }
2146}