blob: 55a051130165befdf804a0df39f76c7365c1bd48 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
Swetha Chikkaboraiah6186c322020-08-05 16:51:54 +05306 * Copyright (c) 2011-2018, 2020, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
Daniel Rosenberg20746c12016-12-05 16:28:28 -080019#include <linux/atomic.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053021#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080023#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080024#include <linux/fs.h>
25#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080026#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/list.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070028#include <linux/list_sort.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080029#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080030#include <linux/miscdevice.h>
31#include <linux/export.h>
32#include <linux/mm.h>
33#include <linux/mm_types.h>
34#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080035#include <linux/slab.h>
36#include <linux/seq_file.h>
37#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080038#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080040#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080041#include <linux/idr.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070042#include <linux/msm_ion.h>
Olav Hauganff0116e2015-05-28 17:21:45 -070043#include <linux/msm_dma_iommu_mapping.h>
Patrick Dalyeeeb9402016-11-01 20:54:41 -070044#include <trace/events/kmem.h>
45
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080046
47#include "ion.h"
48#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080049#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080050
Patrick Daly7e8cbb42016-11-01 18:37:42 -070051/**
52 * struct ion_device - the metadata of the ion device node
53 * @dev: the actual misc device
54 * @buffers: an rb tree of all the existing buffers
55 * @buffer_lock: lock protecting the tree of buffers
56 * @lock: rwsem protecting the tree of heaps and clients
57 * @heaps: list of all the heaps in the system
58 * @user_clients: list of all the clients created from userspace
59 */
60struct ion_device {
61 struct miscdevice dev;
62 struct rb_root buffers;
63 /* Protects rb_tree */
64 struct mutex buffer_lock;
65 struct rw_semaphore lock;
66 struct plist_head heaps;
67 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
68 unsigned long arg);
69 struct rb_root clients;
70 struct dentry *debug_root;
71 struct dentry *heaps_debug_root;
72 struct dentry *clients_debug_root;
73};
74
75/**
76 * struct ion_client - a process/hw block local address space
77 * @node: node in the tree of all clients
78 * @dev: backpointer to ion device
79 * @handles: an rb tree of all the handles in this client
80 * @idr: an idr space for allocating handle ids
Patrick Daly60f0d9a2017-06-30 17:16:21 -070081 * @lock: lock protecting the tree of handles and idr
Patrick Daly7e8cbb42016-11-01 18:37:42 -070082 * @name: used for debugging
83 * @display_name: used for debugging (unique version of @name)
84 * @display_serial: used for debugging (to make display_name unique)
85 * @task: used for debugging
86 *
87 * A client represents a list of buffers this client may access.
88 * The mutex stored here is used to protect both handles tree
89 * as well as the handles themselves, and should be held while modifying either.
90 */
91struct ion_client {
92 struct rb_node node;
93 struct ion_device *dev;
94 struct rb_root handles;
95 struct idr idr;
Patrick Daly7e8cbb42016-11-01 18:37:42 -070096 struct mutex lock;
Patrick Dalyeeeb9402016-11-01 20:54:41 -070097 char *name;
Patrick Daly7e8cbb42016-11-01 18:37:42 -070098 char *display_name;
99 int display_serial;
100 struct task_struct *task;
101 pid_t pid;
102 struct dentry *debug_root;
103};
104
105/**
106 * ion_handle - a client local reference to a buffer
107 * @ref: reference count
108 * @client: back pointer to the client the buffer resides in
109 * @buffer: pointer to the buffer
110 * @node: node in the client's handle rbtree
111 * @kmap_cnt: count of times this client has mapped to kernel
112 * @id: client-unique id allocated by client->idr
113 *
114 * Modifications to node, map_cnt or mapping should be protected by the
115 * lock in the client. Other fields are never changed after initialization.
116 */
117struct ion_handle {
118 struct kref ref;
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800119 unsigned int user_ref_count;
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700120 struct ion_client *client;
121 struct ion_buffer *buffer;
122 struct rb_node node;
123 unsigned int kmap_cnt;
124 int id;
125};
126
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800127bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
128{
John Stultze1d855b2013-12-13 19:26:33 -0800129 return (buffer->flags & ION_FLAG_CACHED) &&
130 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800131}
132
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800133bool ion_buffer_cached(struct ion_buffer *buffer)
134{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800135 return !!(buffer->flags & ION_FLAG_CACHED);
136}
137
138static inline struct page *ion_buffer_page(struct page *page)
139{
140 return (struct page *)((unsigned long)page & ~(1UL));
141}
142
143static inline bool ion_buffer_page_is_dirty(struct page *page)
144{
145 return !!((unsigned long)page & 1UL);
146}
147
148static inline void ion_buffer_page_dirty(struct page **page)
149{
150 *page = (struct page *)((unsigned long)(*page) | 1UL);
151}
152
153static inline void ion_buffer_page_clean(struct page **page)
154{
155 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800156}
157
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800158/* this function should only be called while dev->lock is held */
159static void ion_buffer_add(struct ion_device *dev,
160 struct ion_buffer *buffer)
161{
162 struct rb_node **p = &dev->buffers.rb_node;
163 struct rb_node *parent = NULL;
164 struct ion_buffer *entry;
165
166 while (*p) {
167 parent = *p;
168 entry = rb_entry(parent, struct ion_buffer, node);
169
170 if (buffer < entry) {
171 p = &(*p)->rb_left;
172 } else if (buffer > entry) {
173 p = &(*p)->rb_right;
174 } else {
175 pr_err("%s: buffer already found.", __func__);
176 BUG();
177 }
178 }
179
180 rb_link_node(&buffer->node, parent, p);
181 rb_insert_color(&buffer->node, &dev->buffers);
182}
183
184/* this function should only be called while dev->lock is held */
185static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700186 struct ion_device *dev,
187 unsigned long len,
188 unsigned long align,
189 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800190{
191 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800192 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800193 struct scatterlist *sg;
194 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800195
Ben Marsh411059f2016-03-28 19:26:19 +0200196 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800197 if (!buffer)
198 return ERR_PTR(-ENOMEM);
199
200 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800201 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800202 kref_init(&buffer->ref);
203
204 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800205
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800207 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
208 goto err2;
209
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800210 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800211 ret = heap->ops->allocate(heap, buffer, len, align,
212 flags);
213 if (ret)
214 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800215 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800216
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700217 buffer->dev = dev;
218 buffer->size = len;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700219 INIT_LIST_HEAD(&buffer->vmas);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700220
221 table = heap->ops->map_dma(heap, buffer);
222 if (WARN_ONCE(!table,
223 "heap->ops->map_dma should return ERR_PTR on error"))
224 table = ERR_PTR(-EINVAL);
225 if (IS_ERR(table)) {
Rohit kumara56d0922015-09-30 11:07:35 +0530226 ret = -EINVAL;
227 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800228 }
Rohit kumara56d0922015-09-30 11:07:35 +0530229
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700230 buffer->sg_table = table;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800231 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800232 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
233 struct scatterlist *sg;
234 int i, j, k = 0;
235
236 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
237 if (!buffer->pages) {
238 ret = -ENOMEM;
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700239 goto err;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800240 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800241
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800242 for_each_sg(table->sgl, sg, table->nents, i) {
243 struct page *page = sg_page(sg);
244
Colin Cross06e0dca2013-12-13 14:25:02 -0800245 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800246 buffer->pages[k++] = page++;
247 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800248 }
249
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800250 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530251 /*
252 * this will set up dma addresses for the sglist -- it is not
253 * technically correct as per the dma api -- a specific
254 * device isn't really taking ownership here. However, in practice on
255 * our systems the only dma_address space is physical addresses.
256 * Additionally, we can't afford the overhead of invalidating every
257 * allocation via dma_map_sg. The implicit contract here is that
258 * memory coming from the heaps is ready for dma, ie if it has a
259 * cached mapping that mapping has been invalidated
260 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000261 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800262 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000263 sg_dma_len(sg) = sg->length;
264 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700265
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800266 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800267 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800268 mutex_unlock(&dev->buffer_lock);
Patrick Dalye4640062017-08-01 19:56:52 -0700269 atomic_long_add(len, &heap->total_allocated);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800270 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800271
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700272err:
273 heap->ops->unmap_dma(heap, buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800274err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530275 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800276err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800277 kfree(buffer);
278 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800279}
280
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800281void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800282{
Laura Abbott8da07ee2018-05-14 14:35:09 -0700283 if (buffer->kmap_cnt > 0) {
284 pr_warn_once("%s: buffer still mapped in the kernel\n",
285 __func__);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800286 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Laura Abbott8da07ee2018-05-14 14:35:09 -0700287 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700288 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700289
Patrick Dalye4640062017-08-01 19:56:52 -0700290 atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800291 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100292 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800293 kfree(buffer);
294}
295
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800296static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800297{
298 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800299 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300 struct ion_device *dev = buffer->dev;
301
Laura Abbott29defcc2014-08-01 16:13:40 -0700302 msm_dma_buf_freed(buffer);
303
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800304 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800305 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800306 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800307
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800308 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
309 ion_heap_freelist_add(heap, buffer);
310 else
311 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800312}
313
314static void ion_buffer_get(struct ion_buffer *buffer)
315{
316 kref_get(&buffer->ref);
317}
318
319static int ion_buffer_put(struct ion_buffer *buffer)
320{
Laura Abbott29defcc2014-08-01 16:13:40 -0700321 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800322}
323
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800324static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
325{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800326 mutex_lock(&buffer->lock);
Laura Abbott29defcc2014-08-01 16:13:40 -0700327 if (buffer->handle_count == 0)
Patrick Dalye4640062017-08-01 19:56:52 -0700328 atomic_long_add(buffer->size, &buffer->heap->total_handles);
Laura Abbott29defcc2014-08-01 16:13:40 -0700329
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800330 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800331 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800332}
333
334static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
335{
336 /*
337 * when a buffer is removed from a handle, if it is not in
338 * any other handles, copy the taskcomm and the pid of the
339 * process it's being removed from into the buffer. At this
340 * point there will be no way to track what processes this buffer is
341 * being used by, it only exists as a dma_buf file descriptor.
342 * The taskcomm and pid can provide a debug hint as to where this fd
343 * is in the system
344 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800345 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800346 buffer->handle_count--;
347 BUG_ON(buffer->handle_count < 0);
348 if (!buffer->handle_count) {
349 struct task_struct *task;
350
351 task = current->group_leader;
352 get_task_comm(buffer->task_comm, task);
353 buffer->pid = task_pid_nr(task);
Patrick Dalye4640062017-08-01 19:56:52 -0700354 atomic_long_sub(buffer->size, &buffer->heap->total_handles);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800355 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800356 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800357}
358
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800359static struct ion_handle *ion_handle_create(struct ion_client *client,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700360 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800361{
362 struct ion_handle *handle;
363
Ben Marsh411059f2016-03-28 19:26:19 +0200364 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800365 if (!handle)
366 return ERR_PTR(-ENOMEM);
367 kref_init(&handle->ref);
368 RB_CLEAR_NODE(&handle->node);
369 handle->client = client;
370 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800371 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800372 handle->buffer = buffer;
373
374 return handle;
375}
376
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800377static void ion_handle_kmap_put(struct ion_handle *);
378
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800379static void ion_handle_destroy(struct kref *kref)
380{
381 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800382 struct ion_client *client = handle->client;
383 struct ion_buffer *buffer = handle->buffer;
384
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800385 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800386 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800387 ion_handle_kmap_put(handle);
388 mutex_unlock(&buffer->lock);
389
Colin Cross47b40452013-12-13 14:24:50 -0800390 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800391 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800392 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800393
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800394 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800395 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800396
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800397 kfree(handle);
398}
399
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700400struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
401{
402 return handle->buffer;
403}
404
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800405static void ion_handle_get(struct ion_handle *handle)
406{
407 kref_get(&handle->ref);
408}
409
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800410/* Must hold the client lock */
411static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
412{
413 if (atomic_read(&handle->ref.refcount) + 1 == 0)
414 return ERR_PTR(-EOVERFLOW);
415 ion_handle_get(handle);
416 return handle;
417}
418
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700419static int ion_handle_put_nolock(struct ion_handle *handle)
EunTaik Lee95902322016-02-24 04:38:06 +0000420{
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700421 int ret;
422
423 ret = kref_put(&handle->ref, ion_handle_destroy);
424
425 return ret;
EunTaik Lee95902322016-02-24 04:38:06 +0000426}
427
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700428int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800429{
Colin Cross83271f62013-12-13 14:24:59 -0800430 struct ion_client *client = handle->client;
431 int ret;
432
433 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000434 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800435 mutex_unlock(&client->lock);
436
437 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800438}
439
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800440/* Must hold the client lock */
441static void user_ion_handle_get(struct ion_handle *handle)
442{
443 if (handle->user_ref_count++ == 0)
444 kref_get(&handle->ref);
445}
446
447/* Must hold the client lock */
448static struct ion_handle *user_ion_handle_get_check_overflow(
449 struct ion_handle *handle)
450{
451 if (handle->user_ref_count + 1 == 0)
452 return ERR_PTR(-EOVERFLOW);
453 user_ion_handle_get(handle);
454 return handle;
455}
456
457/* passes a kref to the user ref count.
458 * We know we're holding a kref to the object before and
459 * after this call, so no need to reverify handle.
460 */
461static struct ion_handle *pass_to_user(struct ion_handle *handle)
462{
463 struct ion_client *client = handle->client;
464 struct ion_handle *ret;
465
466 mutex_lock(&client->lock);
467 ret = user_ion_handle_get_check_overflow(handle);
468 ion_handle_put_nolock(handle);
469 mutex_unlock(&client->lock);
470 return ret;
471}
472
473/* Must hold the client lock */
474static int user_ion_handle_put_nolock(struct ion_handle *handle)
475{
Daniel Rosenberg5058f3c2017-04-04 14:27:16 -0700476 int ret = 0;
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800477
478 if (--handle->user_ref_count == 0)
479 ret = ion_handle_put_nolock(handle);
480
481 return ret;
482}
483
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484static struct ion_handle *ion_handle_lookup(struct ion_client *client,
485 struct ion_buffer *buffer)
486{
Colin Crosse1cf3682013-12-13 14:24:51 -0800487 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800488
Colin Crosse1cf3682013-12-13 14:24:51 -0800489 while (n) {
490 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900491
Colin Crosse1cf3682013-12-13 14:24:51 -0800492 if (buffer < entry->buffer)
493 n = n->rb_left;
494 else if (buffer > entry->buffer)
495 n = n->rb_right;
496 else
497 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800498 }
Colin Cross9e907652013-12-13 14:24:49 -0800499 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800500}
501
Laura Abbottb1fa6d82016-09-07 11:49:58 -0700502struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
503 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000504{
505 struct ion_handle *handle;
506
507 handle = idr_find(&client->idr, id);
508 if (handle)
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800509 return ion_handle_get_check_overflow(handle);
EunTaik Lee95902322016-02-24 04:38:06 +0000510
Daniel Rosenberg20746c12016-12-05 16:28:28 -0800511 return ERR_PTR(-EINVAL);
EunTaik Lee95902322016-02-24 04:38:06 +0000512}
513
Minming Qi69376be2018-11-01 10:47:10 +0800514bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800515{
Colin Cross83271f62013-12-13 14:24:59 -0800516 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900517 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800518}
519
520static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
521{
Colin Crossb26661d2013-12-13 14:25:05 -0800522 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800523 struct rb_node **p = &client->handles.rb_node;
524 struct rb_node *parent = NULL;
525 struct ion_handle *entry;
526
Colin Crossb26661d2013-12-13 14:25:05 -0800527 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
528 if (id < 0)
529 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800530
Colin Crossb26661d2013-12-13 14:25:05 -0800531 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800532
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800533 while (*p) {
534 parent = *p;
535 entry = rb_entry(parent, struct ion_handle, node);
536
Colin Crosse1cf3682013-12-13 14:24:51 -0800537 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800538 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800539 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800540 p = &(*p)->rb_right;
541 else
542 WARN(1, "%s: buffer already found.", __func__);
543 }
544
545 rb_link_node(&handle->node, parent, p);
546 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800547
548 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800549}
550
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700551static struct ion_handle *__ion_alloc(
552 struct ion_client *client, size_t len,
553 size_t align, unsigned int heap_id_mask,
554 unsigned int flags, bool grab_handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800555{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800556 struct ion_handle *handle;
557 struct ion_device *dev = client->dev;
558 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800559 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800560 int ret;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700561 const unsigned int MAX_DBG_STR_LEN = 64;
562 char dbg_str[MAX_DBG_STR_LEN];
563 unsigned int dbg_str_idx = 0;
564
565 dbg_str[0] = '\0';
566
567 /*
568 * For now, we don't want to fault in pages individually since
569 * clients are already doing manual cache maintenance. In
570 * other words, the implicit caching infrastructure is in
571 * place (in code) but should not be used.
572 */
573 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800574
Colin Crosse61fc912013-12-13 19:26:14 -0800575 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800576 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800577 /*
578 * traverse the list of heaps available in this system in priority
579 * order. If the heap type is supported by the client, and matches the
580 * request of the caller allocate from it. Repeat until allocate has
581 * succeeded or all heaps have been tried
582 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800583 len = PAGE_ALIGN(len);
584
Colin Crossa14baf72013-12-13 14:25:00 -0800585 if (!len)
586 return ERR_PTR(-EINVAL);
587
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800588 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800589 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800590 /* if the caller didn't specify this heap id */
591 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800592 continue;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700593 trace_ion_alloc_buffer_start(client->name, heap->name, len,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530594 heap_id_mask, flags, client->pid, current->comm,
595 current->pid, (void *)buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800596 buffer = ion_buffer_create(heap, dev, len, align, flags);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700597 trace_ion_alloc_buffer_end(client->name, heap->name, len,
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530598 heap_id_mask, flags, client->pid, current->comm,
599 current->pid, (void *)buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800600 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800601 break;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700602
603 trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
604 heap_id_mask, flags,
605 PTR_ERR(buffer));
606 if (dbg_str_idx < MAX_DBG_STR_LEN) {
607 unsigned int len_left;
608 int ret_value;
609
610 len_left = MAX_DBG_STR_LEN - dbg_str_idx - 1;
611 ret_value = snprintf(&dbg_str[dbg_str_idx],
612 len_left, "%s ", heap->name);
613
614 if (ret_value >= len_left) {
615 /* overflow */
616 dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
617 dbg_str_idx = MAX_DBG_STR_LEN;
618 } else if (ret_value >= 0) {
619 dbg_str_idx += ret_value;
620 } else {
621 /* error */
622 dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
623 }
624 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800625 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800626 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800627
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700628 if (!buffer) {
629 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
630 heap_id_mask, flags, -ENODEV);
KyongHo Cho54ac07842013-12-13 14:23:39 -0800631 return ERR_PTR(-ENODEV);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700632 }
KyongHo Cho54ac07842013-12-13 14:23:39 -0800633
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700634 if (IS_ERR(buffer)) {
635 trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
636 heap_id_mask, flags,
637 PTR_ERR(buffer));
638 pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
639 len, align, dbg_str, client->name);
Iulia Manda464a5022014-03-11 20:14:36 +0200640 return ERR_CAST(buffer);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700641 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800642
643 handle = ion_handle_create(client, buffer);
644
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800645 /*
646 * ion_buffer_create will create a buffer with a ref_cnt of 1,
647 * and ion_handle_create will take a second reference, drop one here
648 */
649 ion_buffer_put(buffer);
650
Colin Cross47b40452013-12-13 14:24:50 -0800651 if (IS_ERR(handle))
652 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800653
Colin Cross47b40452013-12-13 14:24:50 -0800654 mutex_lock(&client->lock);
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700655 if (grab_handle)
656 ion_handle_get(handle);
Colin Cross47b40452013-12-13 14:24:50 -0800657 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800658 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800659 if (ret) {
660 ion_handle_put(handle);
661 handle = ERR_PTR(ret);
662 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800663
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800664 return handle;
665}
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -0700666
667struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
668 size_t align, unsigned int heap_id_mask,
669 unsigned int flags)
670{
671 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
672}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800673EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800674
Minming Qi69376be2018-11-01 10:47:10 +0800675void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800676{
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700677 bool valid_handle;
678
679 WARN_ON(client != handle->client);
680
681 valid_handle = ion_handle_validate(client, handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700682 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800683 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800684 return;
685 }
EunTaik Lee95902322016-02-24 04:38:06 +0000686 ion_handle_put_nolock(handle);
687}
688
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800689static void user_ion_free_nolock(struct ion_client *client,
690 struct ion_handle *handle)
691{
692 bool valid_handle;
693
694 WARN_ON(client != handle->client);
695
696 valid_handle = ion_handle_validate(client, handle);
697 if (!valid_handle) {
698 WARN(1, "%s: invalid handle passed to free.\n", __func__);
699 return;
700 }
Sudarshan Rajagopalana17012a2017-05-01 15:52:29 -0700701 if (handle->user_ref_count == 0) {
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800702 WARN(1, "%s: User does not have access!\n", __func__);
703 return;
704 }
Vishwanath Raju Kf67270b2018-09-17 18:03:01 +0530705 trace_ion_free_buffer(client->name, client->pid, current->comm,
706 current->pid, (void *)handle->buffer,
707 handle->buffer->size);
Daniel Rosenberg8531a792017-02-03 20:37:06 -0800708 user_ion_handle_put_nolock(handle);
709}
710
EunTaik Lee95902322016-02-24 04:38:06 +0000711void ion_free(struct ion_client *client, struct ion_handle *handle)
712{
713 BUG_ON(client != handle->client);
714
715 mutex_lock(&client->lock);
716 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800717 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800718}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800719EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800720
Minming Qi69376be2018-11-01 10:47:10 +0800721static int __ion_phys(struct ion_client *client, struct ion_handle *handle,
722 ion_phys_addr_t *addr, size_t *len, bool lock_client)
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700723{
724 struct ion_buffer *buffer;
725 int ret;
726
Minming Qi69376be2018-11-01 10:47:10 +0800727 if (lock_client)
728 mutex_lock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700729 if (!ion_handle_validate(client, handle)) {
Minming Qi69376be2018-11-01 10:47:10 +0800730 if (lock_client)
731 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700732 return -EINVAL;
733 }
734
735 buffer = handle->buffer;
736
737 if (!buffer->heap->ops->phys) {
738 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
Minming Qi69376be2018-11-01 10:47:10 +0800739 __func__, buffer->heap->name, buffer->heap->type);
740 if (lock_client)
741 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700742 return -ENODEV;
743 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700744 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
Minming Qi69376be2018-11-01 10:47:10 +0800745 if (lock_client)
746 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700747 return ret;
748}
Minming Qi69376be2018-11-01 10:47:10 +0800749
750int ion_phys(struct ion_client *client, struct ion_handle *handle,
751 ion_phys_addr_t *addr, size_t *len)
752{
753 return __ion_phys(client, handle, addr, len, true);
754}
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700755EXPORT_SYMBOL(ion_phys);
756
Minming Qi69376be2018-11-01 10:47:10 +0800757int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
758 ion_phys_addr_t *addr, size_t *len)
759{
760 return __ion_phys(client, handle, addr, len, false);
761}
762
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800763static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
764{
765 void *vaddr;
766
767 if (buffer->kmap_cnt) {
768 buffer->kmap_cnt++;
769 return buffer->vaddr;
770 }
771 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800772 if (WARN_ONCE(vaddr == NULL,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700773 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800774 return ERR_PTR(-EINVAL);
775 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800776 return vaddr;
777 buffer->vaddr = vaddr;
778 buffer->kmap_cnt++;
779 return vaddr;
780}
781
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800782static void *ion_handle_kmap_get(struct ion_handle *handle)
783{
784 struct ion_buffer *buffer = handle->buffer;
785 void *vaddr;
786
787 if (handle->kmap_cnt) {
788 handle->kmap_cnt++;
789 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800790 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800791 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800792 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800793 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800794 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800795 return vaddr;
796}
797
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800798static void ion_buffer_kmap_put(struct ion_buffer *buffer)
799{
800 buffer->kmap_cnt--;
801 if (!buffer->kmap_cnt) {
802 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
803 buffer->vaddr = NULL;
804 }
805}
806
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800807static void ion_handle_kmap_put(struct ion_handle *handle)
808{
809 struct ion_buffer *buffer = handle->buffer;
810
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700811 if (!handle->kmap_cnt) {
812 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
813 return;
814 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800815 handle->kmap_cnt--;
816 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800817 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800818}
819
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800820void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
821{
822 struct ion_buffer *buffer;
823 void *vaddr;
824
825 mutex_lock(&client->lock);
826 if (!ion_handle_validate(client, handle)) {
827 pr_err("%s: invalid handle passed to map_kernel.\n",
828 __func__);
829 mutex_unlock(&client->lock);
830 return ERR_PTR(-EINVAL);
831 }
832
833 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800834
835 if (!handle->buffer->heap->ops->map_kernel) {
836 pr_err("%s: map_kernel is not implemented by this heap.\n",
837 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800838 mutex_unlock(&client->lock);
839 return ERR_PTR(-ENODEV);
840 }
841
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800842 mutex_lock(&buffer->lock);
843 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800844 mutex_unlock(&buffer->lock);
845 mutex_unlock(&client->lock);
846 return vaddr;
847}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800848EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800849
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800850void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
851{
852 struct ion_buffer *buffer;
853
854 mutex_lock(&client->lock);
855 buffer = handle->buffer;
856 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800857 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800858 mutex_unlock(&buffer->lock);
859 mutex_unlock(&client->lock);
860}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800861EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800862
Neil Zhang948c4db2016-01-26 17:39:06 +0800863static struct rb_root *ion_root_client;
Neil Zhang948c4db2016-01-26 17:39:06 +0800864
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800865static int ion_debug_client_show(struct seq_file *s, void *unused)
866{
867 struct ion_client *client = s->private;
868 struct rb_node *n;
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700869
870 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
871 "heap_name", "size_in_bytes", "handle refcount",
872 "buffer");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800873
874 mutex_lock(&client->lock);
875 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
876 struct ion_handle *handle = rb_entry(n, struct ion_handle,
877 node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800878
Nick Desaulniers25f2d032016-10-07 11:51:15 -0700879 seq_printf(s, "%16.16s: %16zx : %16d : %12pK",
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700880 handle->buffer->heap->name,
881 handle->buffer->size,
882 atomic_read(&handle->ref.refcount),
883 handle->buffer);
884
885 seq_puts(s, "\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800886 }
887 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800888 return 0;
889}
890
891static int ion_debug_client_open(struct inode *inode, struct file *file)
892{
893 return single_open(file, ion_debug_client_show, inode->i_private);
894}
895
896static const struct file_operations debug_client_fops = {
897 .open = ion_debug_client_open,
898 .read = seq_read,
899 .llseek = seq_lseek,
900 .release = single_release,
901};
902
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800903static int ion_get_client_serial(const struct rb_root *root,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700904 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800905{
906 int serial = -1;
907 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900908
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800909 for (node = rb_first(root); node; node = rb_next(node)) {
910 struct ion_client *client = rb_entry(node, struct ion_client,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700911 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900912
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800913 if (strcmp(client->name, name))
914 continue;
915 serial = max(serial, client->display_serial);
916 }
917 return serial + 1;
918}
919
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800920struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800921 const char *name)
922{
923 struct ion_client *client;
924 struct task_struct *task;
925 struct rb_node **p;
926 struct rb_node *parent = NULL;
927 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800928 pid_t pid;
929
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800930 if (!name) {
931 pr_err("%s: Name cannot be null\n", __func__);
932 return ERR_PTR(-EINVAL);
933 }
934
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800935 get_task_struct(current->group_leader);
936 task_lock(current->group_leader);
937 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530938 /*
939 * don't bother to store task struct for kernel threads,
940 * they can't be killed anyway
941 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800942 if (current->group_leader->flags & PF_KTHREAD) {
943 put_task_struct(current->group_leader);
944 task = NULL;
945 } else {
946 task = current->group_leader;
947 }
948 task_unlock(current->group_leader);
949
Ben Marsh411059f2016-03-28 19:26:19 +0200950 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800951 if (!client)
952 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800953
954 client->dev = dev;
955 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800956 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800957 mutex_init(&client->lock);
Patrick Dalyeeeb9402016-11-01 20:54:41 -0700958
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800959 client->task = task;
960 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800961 client->name = kstrdup(name, GFP_KERNEL);
962 if (!client->name)
963 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800964
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800965 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800966 client->display_serial = ion_get_client_serial(&dev->clients, name);
967 client->display_name = kasprintf(
968 GFP_KERNEL, "%s-%d", name, client->display_serial);
969 if (!client->display_name) {
970 up_write(&dev->lock);
971 goto err_free_client_name;
972 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800973 p = &dev->clients.rb_node;
974 while (*p) {
975 parent = *p;
976 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800977
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800978 if (client < entry)
979 p = &(*p)->rb_left;
980 else if (client > entry)
981 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800982 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800983 rb_link_node(&client->node, parent, p);
984 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800985
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800986 client->debug_root = debugfs_create_file(client->display_name, 0664,
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700987 dev->clients_debug_root,
988 client, &debug_client_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800989 if (!client->debug_root) {
990 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700991
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800992 path = dentry_path(dev->clients_debug_root, buf, 256);
993 pr_err("Failed to create client debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -0700994 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800995 }
996
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800997 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800998
999 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -08001000
Mitchel Humpherys2803ac72014-02-17 13:58:37 -08001001err_free_client_name:
1002 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -08001003err_free_client:
1004 kfree(client);
1005err_put_task_struct:
1006 if (task)
1007 put_task_struct(current->group_leader);
1008 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001009}
Johan Mossberg9122fe82013-12-13 14:24:29 -08001010EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001011
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001012void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001013{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001014 struct ion_device *dev = client->dev;
1015 struct rb_node *n;
1016
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001017 down_write(&dev->lock);
1018 rb_erase(&client->node, &dev->clients);
1019 up_write(&dev->lock);
1020
1021 /* After this completes, there are no more references to client */
1022 debugfs_remove_recursive(client->debug_root);
1023
1024 mutex_lock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001025 while ((n = rb_first(&client->handles))) {
1026 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1027 node);
1028 ion_handle_destroy(&handle->ref);
1029 }
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001030 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001031
Colin Cross47b40452013-12-13 14:24:50 -08001032 idr_destroy(&client->idr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001033 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001034 put_task_struct(client->task);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -08001035 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -08001036 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001037 kfree(client);
1038}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001039EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001040
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001041int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1042 unsigned long *flags)
1043{
1044 struct ion_buffer *buffer;
1045
1046 mutex_lock(&client->lock);
1047 if (!ion_handle_validate(client, handle)) {
1048 pr_err("%s: invalid handle passed to %s.\n",
1049 __func__, __func__);
1050 mutex_unlock(&client->lock);
1051 return -EINVAL;
1052 }
1053 buffer = handle->buffer;
1054 mutex_lock(&buffer->lock);
1055 *flags = buffer->flags;
1056 mutex_unlock(&buffer->lock);
1057 mutex_unlock(&client->lock);
1058
1059 return 0;
1060}
1061EXPORT_SYMBOL(ion_handle_get_flags);
1062
1063int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1064 size_t *size)
1065{
1066 struct ion_buffer *buffer;
1067
1068 mutex_lock(&client->lock);
1069 if (!ion_handle_validate(client, handle)) {
1070 pr_err("%s: invalid handle passed to %s.\n",
1071 __func__, __func__);
1072 mutex_unlock(&client->lock);
1073 return -EINVAL;
1074 }
1075 buffer = handle->buffer;
1076 mutex_lock(&buffer->lock);
1077 *size = buffer->size;
1078 mutex_unlock(&buffer->lock);
1079 mutex_unlock(&client->lock);
1080
1081 return 0;
1082}
1083EXPORT_SYMBOL(ion_handle_get_size);
1084
Laura Abbott29defcc2014-08-01 16:13:40 -07001085/**
1086 * ion_sg_table - get an sg_table for the buffer
1087 *
1088 * NOTE: most likely you should NOT being using this API.
1089 * You should be using Ion as a DMA Buf exporter and using
1090 * the sg_table returned by dma_buf_map_attachment.
1091 */
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001092struct sg_table *ion_sg_table(struct ion_client *client,
1093 struct ion_handle *handle)
1094{
1095 struct ion_buffer *buffer;
1096 struct sg_table *table;
1097
1098 mutex_lock(&client->lock);
1099 if (!ion_handle_validate(client, handle)) {
1100 pr_err("%s: invalid handle passed to map_dma.\n",
1101 __func__);
1102 mutex_unlock(&client->lock);
1103 return ERR_PTR(-EINVAL);
1104 }
1105 buffer = handle->buffer;
1106 table = buffer->sg_table;
1107 mutex_unlock(&client->lock);
1108 return table;
1109}
1110EXPORT_SYMBOL(ion_sg_table);
1111
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001112struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
1113 size_t chunk_size,
1114 size_t total_size)
1115{
1116 struct sg_table *table;
1117 int i, n_chunks, ret;
1118 struct scatterlist *sg;
1119
1120 table = kzalloc(sizeof(*table), GFP_KERNEL);
1121 if (!table)
1122 return ERR_PTR(-ENOMEM);
1123
1124 n_chunks = DIV_ROUND_UP(total_size, chunk_size);
1125 pr_debug("creating sg_table with %d chunks\n", n_chunks);
1126
1127 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
1128 if (ret)
1129 goto err0;
1130
1131 for_each_sg(table->sgl, sg, table->nents, i) {
1132 dma_addr_t addr = buffer_base + i * chunk_size;
1133
1134 sg_dma_address(sg) = addr;
1135 sg->length = chunk_size;
1136 }
1137
1138 return table;
1139err0:
1140 kfree(table);
1141 return ERR_PTR(ret);
1142}
1143
Laura Abbott29defcc2014-08-01 16:13:40 -07001144static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table)
1145{
1146 int ret, i;
1147 struct scatterlist *sg, *sg_orig;
1148 struct sg_table *table;
1149
1150 table = kzalloc(sizeof(*table), GFP_KERNEL);
1151 if (!table)
1152 return NULL;
1153
1154 ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL);
1155 if (ret) {
1156 kfree(table);
1157 return NULL;
1158 }
1159
1160 sg_orig = orig_table->sgl;
1161 for_each_sg(table->sgl, sg, table->nents, i) {
1162 memcpy(sg, sg_orig, sizeof(*sg));
1163 sg_orig = sg_next(sg_orig);
1164 }
1165 return table;
1166}
1167
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001168static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1169 struct device *dev,
1170 enum dma_data_direction direction);
1171
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001172static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1173 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001174{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001175 struct dma_buf *dmabuf = attachment->dmabuf;
1176 struct ion_buffer *buffer = dmabuf->priv;
Laura Abbott29defcc2014-08-01 16:13:40 -07001177 struct sg_table *table;
1178
1179 table = ion_dupe_sg_table(buffer->sg_table);
1180 if (!table)
1181 return NULL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001182
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001183 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Laura Abbott29defcc2014-08-01 16:13:40 -07001184 return table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001185}
1186
1187static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1188 struct sg_table *table,
1189 enum dma_data_direction direction)
1190{
Laura Abbott29defcc2014-08-01 16:13:40 -07001191 sg_free_table(table);
1192 kfree(table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001193}
1194
Colin Crosse946b202013-12-13 14:25:01 -08001195void ion_pages_sync_for_device(struct device *dev, struct page *page,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001196 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -08001197{
1198 struct scatterlist sg;
1199
Laura Abbott29defcc2014-08-01 16:13:40 -07001200 WARN_ONCE(!dev, "A device is required for dma_sync\n");
1201
Colin Crosse946b202013-12-13 14:25:01 -08001202 sg_init_table(&sg, 1);
1203 sg_set_page(&sg, page, size, 0);
1204 /*
1205 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +05301206 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -08001207 * hardware.
1208 */
1209 sg_dma_address(&sg) = page_to_phys(page);
1210 dma_sync_sg_for_device(dev, &sg, 1, dir);
1211}
1212
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001213struct ion_vma_list {
1214 struct list_head list;
1215 struct vm_area_struct *vma;
1216};
1217
1218static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1219 struct device *dev,
1220 enum dma_data_direction dir)
1221{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001222 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001223 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1224 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001225
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001226 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001227 return;
1228
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001229 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001230 for (i = 0; i < pages; i++) {
1231 struct page *page = buffer->pages[i];
1232
1233 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -08001234 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001235 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -08001236
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001237 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001238 }
1239 list_for_each_entry(vma_list, &buffer->vmas, list) {
1240 struct vm_area_struct *vma = vma_list->vma;
1241
1242 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1243 NULL);
1244 }
1245 mutex_unlock(&buffer->lock);
1246}
1247
Colin Crossf63958d2013-12-13 19:26:28 -08001248static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001249{
1250 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -08001251 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001252 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001253
1254 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001255 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001256 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -08001257
1258 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1259 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001260 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001261 if (ret)
1262 return VM_FAULT_ERROR;
1263
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001264 return VM_FAULT_NOPAGE;
1265}
1266
1267static void ion_vm_open(struct vm_area_struct *vma)
1268{
1269 struct ion_buffer *buffer = vma->vm_private_data;
1270 struct ion_vma_list *vma_list;
1271
Ben Marsh411059f2016-03-28 19:26:19 +02001272 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001273 if (!vma_list)
1274 return;
1275 vma_list->vma = vma;
1276 mutex_lock(&buffer->lock);
1277 list_add(&vma_list->list, &buffer->vmas);
1278 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001279}
1280
1281static void ion_vm_close(struct vm_area_struct *vma)
1282{
1283 struct ion_buffer *buffer = vma->vm_private_data;
1284 struct ion_vma_list *vma_list, *tmp;
1285
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001286 mutex_lock(&buffer->lock);
1287 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1288 if (vma_list->vma != vma)
1289 continue;
1290 list_del(&vma_list->list);
1291 kfree(vma_list);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001292 break;
1293 }
1294 mutex_unlock(&buffer->lock);
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001295
1296 if (buffer->heap->ops->unmap_user)
1297 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001298}
1299
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001300static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001301 .open = ion_vm_open,
1302 .close = ion_vm_close,
1303 .fault = ion_vm_fault,
1304};
1305
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001306static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001307{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001308 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001309 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001310
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001311 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +02001312 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001313 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001314 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001315 }
1316
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001317 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -08001318 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1319 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001320 vma->vm_private_data = buffer;
1321 vma->vm_ops = &ion_vma_ops;
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001322 vma->vm_flags |= VM_MIXEDMAP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001323 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001324 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001325 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001326
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001327 if (!(buffer->flags & ION_FLAG_CACHED))
1328 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1329
1330 mutex_lock(&buffer->lock);
1331 /* now map it to userspace */
1332 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1333 mutex_unlock(&buffer->lock);
1334
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001335 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001336 pr_err("%s: failure mapping buffer to userspace\n",
1337 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001338
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001339 return ret;
1340}
1341
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001342static void ion_dma_buf_release(struct dma_buf *dmabuf)
1343{
1344 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001345
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001346 ion_buffer_put(buffer);
1347}
1348
1349static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1350{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001351 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001352 void *vaddr;
1353
1354 if (!buffer->heap->ops->map_kernel) {
1355 pr_err("%s: map kernel is not implemented by this heap.\n",
1356 __func__);
Hridya Valsaraju05030f52021-07-25 20:49:06 -07001357 return ERR_PTR(-ENOTTY);
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001358 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001359 mutex_lock(&buffer->lock);
1360 vaddr = ion_buffer_kmap_get(buffer);
1361 mutex_unlock(&buffer->lock);
Hridya Valsaraju05030f52021-07-25 20:49:06 -07001362
1363 if (IS_ERR(vaddr))
1364 return vaddr;
1365
1366 return vaddr + offset * PAGE_SIZE;
1367}
1368
1369static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1370 void *ptr)
1371{
1372 struct ion_buffer *buffer = dmabuf->priv;
1373
1374 if (buffer->heap->ops->map_kernel) {
1375 mutex_lock(&buffer->lock);
1376 ion_buffer_kmap_put(buffer);
1377 mutex_unlock(&buffer->lock);
1378 }
1379
1380}
1381
1382static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1383 enum dma_data_direction direction)
1384{
1385 return 0;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001386}
1387
Chris Wilson18b862d2016-03-18 20:02:39 +00001388static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1389 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001390{
Chris Wilson18b862d2016-03-18 20:02:39 +00001391 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001392}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001393
Colin Crossf63958d2013-12-13 19:26:28 -08001394static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001395 .map_dma_buf = ion_map_dma_buf,
1396 .unmap_dma_buf = ion_unmap_dma_buf,
1397 .mmap = ion_mmap,
1398 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001399 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1400 .end_cpu_access = ion_dma_buf_end_cpu_access,
1401 .kmap_atomic = ion_dma_buf_kmap,
1402 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001403 .kmap = ion_dma_buf_kmap,
1404 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001405};
1406
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001407static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1408 struct ion_handle *handle,
1409 bool lock_client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001410{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001411 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001412 struct ion_buffer *buffer;
1413 struct dma_buf *dmabuf;
1414 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301415
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001416 if (lock_client)
1417 mutex_lock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001418 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001419 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001420 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001421 if (lock_client)
1422 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001423 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001424 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001425 buffer = handle->buffer;
1426 ion_buffer_get(buffer);
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001427 if (lock_client)
1428 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -08001429
Sumit Semwal72449cb2015-02-21 09:00:17 +05301430 exp_info.ops = &dma_buf_ops;
1431 exp_info.size = buffer->size;
1432 exp_info.flags = O_RDWR;
1433 exp_info.priv = buffer;
1434
Sumit Semwald8fbe342015-01-23 12:53:43 +05301435 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001436 if (IS_ERR(dmabuf)) {
1437 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001438 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001439 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001440
1441 return dmabuf;
1442}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001443
1444struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1445 struct ion_handle *handle)
1446{
1447 return __ion_share_dma_buf(client, handle, true);
1448}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001449EXPORT_SYMBOL(ion_share_dma_buf);
1450
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001451static int __ion_share_dma_buf_fd(struct ion_client *client,
1452 struct ion_handle *handle, bool lock_client)
Johan Mossberg22ba4322013-12-13 14:24:34 -08001453{
1454 struct dma_buf *dmabuf;
1455 int fd;
1456
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001457 dmabuf = __ion_share_dma_buf(client, handle, lock_client);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001458 if (IS_ERR(dmabuf))
1459 return PTR_ERR(dmabuf);
1460
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001461 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001462 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001463 dma_buf_put(dmabuf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001464 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001465}
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001466
1467int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1468{
1469 return __ion_share_dma_buf_fd(client, handle, true);
1470}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001471EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001472
Greg Hackmann3fedc0c2018-08-31 13:06:27 -07001473int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1474 struct ion_handle *handle)
1475{
1476 return __ion_share_dma_buf_fd(client, handle, false);
1477}
1478
Minming Qi69376be2018-11-01 10:47:10 +08001479static struct ion_handle *__ion_import_dma_buf(struct ion_client *client,
1480 struct dma_buf *dmabuf, bool lock_client)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001481{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001482 struct ion_buffer *buffer;
1483 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001484 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001485
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001486 /* if this memory came from ion */
1487
1488 if (dmabuf->ops != &dma_buf_ops) {
1489 pr_err("%s: can not import dmabuf from another exporter\n",
1490 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001491 return ERR_PTR(-EINVAL);
1492 }
1493 buffer = dmabuf->priv;
1494
Minming Qi69376be2018-11-01 10:47:10 +08001495 if (lock_client)
1496 mutex_lock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001497 /* if a handle exists for this buffer just take a reference to it */
1498 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001499 if (!IS_ERR(handle)) {
Daniel Rosenberg20746c12016-12-05 16:28:28 -08001500 handle = ion_handle_get_check_overflow(handle);
Minming Qi69376be2018-11-01 10:47:10 +08001501 if (lock_client)
1502 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001503 goto end;
1504 }
Colin Cross83271f62013-12-13 14:24:59 -08001505
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001506 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001507 if (IS_ERR(handle)) {
Minming Qi69376be2018-11-01 10:47:10 +08001508 if (lock_client)
1509 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001510 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001511 }
Colin Cross83271f62013-12-13 14:24:59 -08001512
Colin Cross47b40452013-12-13 14:24:50 -08001513 ret = ion_handle_add(client, handle);
Minming Qi69376be2018-11-01 10:47:10 +08001514 if (lock_client)
1515 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001516 if (ret) {
Minming Qi69376be2018-11-01 10:47:10 +08001517 if (lock_client)
1518 ion_handle_put(handle);
1519 else
1520 ion_handle_put_nolock(handle);
Colin Cross47b40452013-12-13 14:24:50 -08001521 handle = ERR_PTR(ret);
1522 }
Colin Cross83271f62013-12-13 14:24:59 -08001523
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001524end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001525 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001526}
Minming Qi69376be2018-11-01 10:47:10 +08001527
1528struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1529 struct dma_buf *dmabuf)
1530{
1531 return __ion_import_dma_buf(client, dmabuf, true);
1532}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001533EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001534
Minming Qi69376be2018-11-01 10:47:10 +08001535static struct ion_handle *__ion_import_dma_buf_fd(struct ion_client *client,
1536 int fd, bool lock_client)
Rohit kumar9f903812016-01-12 09:31:46 +05301537{
1538 struct dma_buf *dmabuf;
1539 struct ion_handle *handle;
1540
1541 dmabuf = dma_buf_get(fd);
1542 if (IS_ERR(dmabuf))
1543 return ERR_CAST(dmabuf);
1544
Minming Qi69376be2018-11-01 10:47:10 +08001545 handle = __ion_import_dma_buf(client, dmabuf, lock_client);
Rohit kumar9f903812016-01-12 09:31:46 +05301546 dma_buf_put(dmabuf);
1547 return handle;
1548}
Minming Qi69376be2018-11-01 10:47:10 +08001549
1550struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1551{
1552 return __ion_import_dma_buf_fd(client, fd, true);
1553}
Rohit kumar9f903812016-01-12 09:31:46 +05301554EXPORT_SYMBOL(ion_import_dma_buf_fd);
1555
Minming Qi69376be2018-11-01 10:47:10 +08001556struct ion_handle *ion_import_dma_buf_fd_nolock(struct ion_client *client, int fd)
1557{
1558 return __ion_import_dma_buf_fd(client, fd, false);
1559}
1560
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001561static int ion_sync_for_device(struct ion_client *client, int fd)
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001562{
1563 struct dma_buf *dmabuf;
1564 struct ion_buffer *buffer;
1565
1566 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001567 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001568 return PTR_ERR(dmabuf);
1569
1570 /* if this memory came from ion */
1571 if (dmabuf->ops != &dma_buf_ops) {
1572 pr_err("%s: can not sync dmabuf from another exporter\n",
1573 __func__);
1574 dma_buf_put(dmabuf);
1575 return -EINVAL;
1576 }
1577 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001578
Liam Mark53261412017-12-04 10:58:55 -08001579 if (!is_buffer_hlos_assigned(buffer)) {
1580 pr_err("%s: cannot sync a secure dmabuf\n", __func__);
1581 dma_buf_put(dmabuf);
1582 return -EINVAL;
1583 }
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001584 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1585 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001586 dma_buf_put(dmabuf);
1587 return 0;
1588}
1589
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001590/* fix up the cases where the ioctl direction bits are incorrect */
1591static unsigned int ion_ioctl_dir(unsigned int cmd)
Laura Abbott02b23802016-09-07 11:49:59 -07001592{
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001593 switch (cmd) {
1594 case ION_IOC_SYNC:
1595 case ION_IOC_FREE:
1596 case ION_IOC_CUSTOM:
1597 return _IOC_WRITE;
1598 default:
1599 return _IOC_DIR(cmd);
1600 }
1601}
1602
1603static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1604{
1605 struct ion_client *client = filp->private_data;
Laura Abbott02b23802016-09-07 11:49:59 -07001606 struct ion_device *dev = client->dev;
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001607 struct ion_handle *cleanup_handle = NULL;
1608 int ret = 0;
1609 unsigned int dir;
Laura Abbott02b23802016-09-07 11:49:59 -07001610
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001611 union {
1612 struct ion_fd_data fd;
1613 struct ion_allocation_data allocation;
1614 struct ion_handle_data handle;
1615 struct ion_custom_data custom;
1616 } data;
Laura Abbott02b23802016-09-07 11:49:59 -07001617
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001618 dir = ion_ioctl_dir(cmd);
1619
1620 if (_IOC_SIZE(cmd) > sizeof(data))
1621 return -EINVAL;
1622
1623 if (dir & _IOC_WRITE)
1624 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1625 return -EFAULT;
1626
1627 switch (cmd) {
1628 case ION_IOC_ALLOC:
1629 {
1630 struct ion_handle *handle;
1631
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001632 handle = __ion_alloc(client, data.allocation.len,
1633 data.allocation.align,
1634 data.allocation.heap_id_mask,
1635 data.allocation.flags, true);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001636 if (IS_ERR(handle))
1637 return PTR_ERR(handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001638 data.allocation.handle = handle->id;
1639
1640 cleanup_handle = handle;
Lee Jones11439502022-01-25 14:18:08 +00001641 pass_to_user(handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001642 break;
1643 }
1644 case ION_IOC_FREE:
1645 {
1646 struct ion_handle *handle;
1647
1648 mutex_lock(&client->lock);
1649 handle = ion_handle_get_by_id_nolock(client,
1650 data.handle.handle);
1651 if (IS_ERR(handle)) {
1652 mutex_unlock(&client->lock);
1653 return PTR_ERR(handle);
1654 }
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001655 user_ion_free_nolock(client, handle);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001656 ion_handle_put_nolock(handle);
1657 mutex_unlock(&client->lock);
1658 break;
1659 }
1660 case ION_IOC_SHARE:
1661 case ION_IOC_MAP:
1662 {
1663 struct ion_handle *handle;
1664
Swetha Chikkaboraiah6186c322020-08-05 16:51:54 +05301665 mutex_lock(&client->lock);
Minming Qi69376be2018-11-01 10:47:10 +08001666 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
Swetha Chikkaboraiah6186c322020-08-05 16:51:54 +05301667 if (IS_ERR(handle)) {
1668 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001669 return PTR_ERR(handle);
Swetha Chikkaboraiah6186c322020-08-05 16:51:54 +05301670 }
1671 data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
1672 ion_handle_put_nolock(handle);
1673 mutex_unlock(&client->lock);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001674 if (data.fd.fd < 0)
1675 ret = data.fd.fd;
1676 break;
1677 }
1678 case ION_IOC_IMPORT:
1679 {
1680 struct ion_handle *handle;
1681
1682 handle = ion_import_dma_buf_fd(client, data.fd.fd);
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001683 if (IS_ERR(handle)) {
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001684 ret = PTR_ERR(handle);
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001685 } else {
Lee Jones11439502022-01-25 14:18:08 +00001686 data.handle.handle = handle->id;
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001687 handle = pass_to_user(handle);
Lee Jones11439502022-01-25 14:18:08 +00001688 if (IS_ERR(handle)) {
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001689 ret = PTR_ERR(handle);
Lee Jones11439502022-01-25 14:18:08 +00001690 data.handle.handle = 0;
1691 }
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001692 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001693 break;
1694 }
1695 case ION_IOC_SYNC:
1696 {
1697 ret = ion_sync_for_device(client, data.fd.fd);
1698 break;
1699 }
1700 case ION_IOC_CUSTOM:
1701 {
1702 if (!dev->custom_ioctl)
1703 return -ENOTTY;
1704 ret = dev->custom_ioctl(client, data.custom.cmd,
1705 data.custom.arg);
1706 break;
1707 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001708 case ION_IOC_CLEAN_CACHES:
1709 return client->dev->custom_ioctl(client,
1710 ION_IOC_CLEAN_CACHES, arg);
1711 case ION_IOC_INV_CACHES:
1712 return client->dev->custom_ioctl(client,
1713 ION_IOC_INV_CACHES, arg);
1714 case ION_IOC_CLEAN_INV_CACHES:
1715 return client->dev->custom_ioctl(client,
1716 ION_IOC_CLEAN_INV_CACHES, arg);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001717 default:
1718 return -ENOTTY;
Laura Abbott02b23802016-09-07 11:49:59 -07001719 }
1720
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001721 if (dir & _IOC_READ) {
1722 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001723 if (cleanup_handle) {
Daniel Rosenberg8531a792017-02-03 20:37:06 -08001724 mutex_lock(&client->lock);
1725 user_ion_free_nolock(client, cleanup_handle);
1726 ion_handle_put_nolock(cleanup_handle);
1727 mutex_unlock(&client->lock);
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001728 }
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001729 return -EFAULT;
1730 }
Laura Abbott02b23802016-09-07 11:49:59 -07001731 }
Daniel Rosenbergc30d45a2016-11-02 17:43:51 -07001732 if (cleanup_handle)
1733 ion_handle_put(cleanup_handle);
Laura Abbott02b23802016-09-07 11:49:59 -07001734 return ret;
1735}
1736
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001737static int ion_release(struct inode *inode, struct file *file)
1738{
1739 struct ion_client *client = file->private_data;
1740
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001741 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001742 return 0;
1743}
1744
1745static int ion_open(struct inode *inode, struct file *file)
1746{
1747 struct miscdevice *miscdev = file->private_data;
1748 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1749 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001750 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001751
Laura Abbott483ed032014-02-17 13:58:35 -08001752 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1753 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001754 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001755 return PTR_ERR(client);
1756 file->private_data = client;
1757
1758 return 0;
1759}
1760
1761static const struct file_operations ion_fops = {
1762 .owner = THIS_MODULE,
1763 .open = ion_open,
1764 .release = ion_release,
1765 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001766 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001767};
1768
1769static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001770 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001771{
1772 size_t size = 0;
1773 struct rb_node *n;
1774
1775 mutex_lock(&client->lock);
1776 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1777 struct ion_handle *handle = rb_entry(n,
1778 struct ion_handle,
1779 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001780 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001781 size += handle->buffer->size;
1782 }
1783 mutex_unlock(&client->lock);
1784 return size;
1785}
1786
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001787/**
1788 * Create a mem_map of the heap.
1789 * @param s seq_file to log error message to.
1790 * @param heap The heap to create mem_map for.
1791 * @param mem_map The mem map to be created.
1792 */
1793void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1794 struct list_head *mem_map)
1795{
1796 struct ion_device *dev = heap->dev;
1797 struct rb_node *cnode;
1798 size_t size;
1799 struct ion_client *client;
1800
1801 if (!heap->ops->phys)
1802 return;
1803
1804 down_read(&dev->lock);
1805 for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
1806 struct rb_node *hnode;
1807
1808 client = rb_entry(cnode, struct ion_client, node);
1809
1810 mutex_lock(&client->lock);
1811 for (hnode = rb_first(&client->handles);
1812 hnode;
1813 hnode = rb_next(hnode)) {
1814 struct ion_handle *handle = rb_entry(
1815 hnode, struct ion_handle, node);
1816 if (handle->buffer->heap == heap) {
1817 struct mem_map_data *data =
1818 kzalloc(sizeof(*data), GFP_KERNEL);
1819 if (!data)
1820 goto inner_error;
1821 heap->ops->phys(heap, handle->buffer,
1822 &data->addr, &size);
1823 data->size = (unsigned long)size;
1824 data->addr_end = data->addr + data->size - 1;
1825 data->client_name = kstrdup(client->name,
1826 GFP_KERNEL);
1827 if (!data->client_name) {
1828 kfree(data);
1829 goto inner_error;
1830 }
1831 list_add(&data->node, mem_map);
1832 }
1833 }
1834 mutex_unlock(&client->lock);
1835 }
1836 up_read(&dev->lock);
1837 return;
1838
1839inner_error:
1840 seq_puts(s,
1841 "ERROR: out of memory. Part of memory map will not be logged\n");
1842 mutex_unlock(&client->lock);
1843 up_read(&dev->lock);
1844}
1845
1846/**
1847 * Free the memory allocated by ion_debug_mem_map_create
1848 * @param mem_map The mem map to free.
1849 */
1850static void ion_debug_mem_map_destroy(struct list_head *mem_map)
1851{
1852 if (mem_map) {
1853 struct mem_map_data *data, *tmp;
1854
1855 list_for_each_entry_safe(data, tmp, mem_map, node) {
1856 list_del(&data->node);
1857 kfree(data->client_name);
1858 kfree(data);
1859 }
1860 }
1861}
1862
1863static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
1864{
1865 struct mem_map_data *d1, *d2;
1866
1867 d1 = list_entry(a, struct mem_map_data, node);
1868 d2 = list_entry(b, struct mem_map_data, node);
1869 if (d1->addr == d2->addr)
1870 return d1->size - d2->size;
1871 return d1->addr - d2->addr;
1872}
1873
1874/**
1875 * Print heap debug information.
1876 * @param s seq_file to log message to.
1877 * @param heap pointer to heap that we will print debug information for.
1878 */
1879static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1880{
1881 if (heap->ops->print_debug) {
1882 struct list_head mem_map = LIST_HEAD_INIT(mem_map);
1883
1884 ion_debug_mem_map_create(s, heap, &mem_map);
1885 list_sort(NULL, &mem_map, mem_map_cmp);
1886 heap->ops->print_debug(heap, s, &mem_map);
1887 ion_debug_mem_map_destroy(&mem_map);
1888 }
1889}
1890
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001891static int ion_debug_heap_show(struct seq_file *s, void *unused)
1892{
1893 struct ion_heap *heap = s->private;
1894 struct ion_device *dev = heap->dev;
1895 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001896 size_t total_size = 0;
1897 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001898
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001899 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001900 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001901
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001902 down_read(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001903 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001904 struct ion_client *client = rb_entry(n, struct ion_client,
1905 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001906 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001907
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001908 if (!size)
1909 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001910 if (client->task) {
1911 char task_comm[TASK_COMM_LEN];
1912
1913 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001914 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001915 client->pid, size);
1916 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001917 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001918 client->pid, size);
1919 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001920 }
Patrick Daly60f0d9a2017-06-30 17:16:21 -07001921 up_read(&dev->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +08001922
Iulia Manda164ad862014-03-11 20:12:29 +02001923 seq_puts(s, "----------------------------------------------------\n");
1924 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001925 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001926 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1927 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1928 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001929 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001930 continue;
1931 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001932 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001933 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001934 buffer->task_comm, buffer->pid,
1935 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001936 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001937 total_orphaned_size += buffer->size;
1938 }
1939 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001940 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001941 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001942 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001943 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001944 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001945 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001946 seq_printf(s, "%16s %16zu\n", "deferred free",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07001947 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001948 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001949
1950 if (heap->debug_show)
1951 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001952
Patrick Dalyeeeb9402016-11-01 20:54:41 -07001953 ion_heap_print_debug(s, heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001954 return 0;
1955}
1956
1957static int ion_debug_heap_open(struct inode *inode, struct file *file)
1958{
1959 return single_open(file, ion_debug_heap_show, inode->i_private);
1960}
1961
1962static const struct file_operations debug_heap_fops = {
1963 .open = ion_debug_heap_open,
1964 .read = seq_read,
1965 .llseek = seq_lseek,
1966 .release = single_release,
1967};
1968
Laura Abbott29defcc2014-08-01 16:13:40 -07001969void show_ion_usage(struct ion_device *dev)
1970{
1971 struct ion_heap *heap;
1972
1973 if (!down_read_trylock(&dev->lock)) {
1974 pr_err("Ion output would deadlock, can't print debug information\n");
1975 return;
1976 }
1977
1978 pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size",
1979 "Total orphaned size");
1980 pr_info("---------------------------------\n");
1981 plist_for_each_entry(heap, &dev->heaps, node) {
Patrick Dalye4640062017-08-01 19:56:52 -07001982 pr_info("%16.s 0x%16.lx 0x%16.lx\n",
1983 heap->name, atomic_long_read(&heap->total_allocated),
1984 atomic_long_read(&heap->total_allocated) -
1985 atomic_long_read(&heap->total_handles));
Laura Abbott29defcc2014-08-01 16:13:40 -07001986 if (heap->debug_show)
1987 heap->debug_show(heap, NULL, 0);
1988 }
1989 up_read(&dev->lock);
1990}
1991
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001992static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001993{
John Stultze1d855b2013-12-13 19:26:33 -08001994 struct ion_heap *heap = data;
1995 struct shrink_control sc;
1996 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001997
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001998 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001999 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002000
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002001 if (!val) {
2002 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
2003 sc.nr_to_scan = objs;
2004 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002005
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002006 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08002007 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002008}
2009
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002010static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002011{
John Stultze1d855b2013-12-13 19:26:33 -08002012 struct ion_heap *heap = data;
2013 struct shrink_control sc;
2014 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002015
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05002016 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08002017 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002018
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002019 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08002020 *val = objs;
2021 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002022}
2023
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002024DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08002025 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002026
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002027void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
2028{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002029 struct dentry *debug_file;
2030
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002031 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
2032 !heap->ops->unmap_dma)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08002033 pr_err("%s: can not add heap with invalid ops struct.\n",
2034 __func__);
2035
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08002036 spin_lock_init(&heap->free_lock);
2037 heap->free_list_size = 0;
2038
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002039 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
2040 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08002041
Colin Crossb9daf0b2014-02-17 13:58:38 -08002042 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
2043 ion_heap_init_shrinker(heap);
2044
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002045 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08002046 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302047 /*
2048 * use negative heap->id to reverse the priority -- when traversing
2049 * the list later attempt higher id numbers first
2050 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08002051 plist_node_init(&heap->node, -heap->id);
2052 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002053 debug_file = debugfs_create_file(heap->name, 0664,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002054 dev->heaps_debug_root, heap,
2055 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002056
2057 if (!debug_file) {
2058 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09002059
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002060 path = dentry_path(dev->heaps_debug_root, buf, 256);
2061 pr_err("Failed to create heap debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002062 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002063 }
2064
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002065 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002066 char debug_name[64];
2067
2068 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002069 debug_file = debugfs_create_file(
2070 debug_name, 0644, dev->heaps_debug_root, heap,
2071 &debug_shrink_fops);
2072 if (!debug_file) {
2073 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09002074
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002075 path = dentry_path(dev->heaps_debug_root, buf, 256);
2076 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002077 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002078 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08002079 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09002080
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08002081 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002082}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002083EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002084
Laura Abbott29defcc2014-08-01 16:13:40 -07002085int ion_walk_heaps(struct ion_client *client, int heap_id,
2086 enum ion_heap_type type, void *data,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002087 int (*f)(struct ion_heap *heap, void *data))
2088{
Laura Abbott29defcc2014-08-01 16:13:40 -07002089 int ret_val = 0;
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002090 struct ion_heap *heap;
2091 struct ion_device *dev = client->dev;
2092 /*
2093 * traverse the list of heaps available in this system
2094 * and find the heap that is specified.
2095 */
2096 down_write(&dev->lock);
2097 plist_for_each_entry(heap, &dev->heaps, node) {
Laura Abbott29defcc2014-08-01 16:13:40 -07002098 if (ION_HEAP(heap->id) != heap_id ||
2099 type != heap->type)
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002100 continue;
2101 ret_val = f(heap, data);
2102 break;
2103 }
2104 up_write(&dev->lock);
2105 return ret_val;
2106}
2107EXPORT_SYMBOL(ion_walk_heaps);
2108
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002109struct ion_device *ion_device_create(long (*custom_ioctl)
2110 (struct ion_client *client,
2111 unsigned int cmd,
2112 unsigned long arg))
2113{
2114 struct ion_device *idev;
2115 int ret;
2116
Ben Marsh411059f2016-03-28 19:26:19 +02002117 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002118 if (!idev)
2119 return ERR_PTR(-ENOMEM);
2120
2121 idev->dev.minor = MISC_DYNAMIC_MINOR;
2122 idev->dev.name = "ion";
2123 idev->dev.fops = &ion_fops;
2124 idev->dev.parent = NULL;
2125 ret = misc_register(&idev->dev);
2126 if (ret) {
2127 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05302128 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002129 return ERR_PTR(ret);
2130 }
2131
2132 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002133 if (!idev->debug_root) {
2134 pr_err("ion: failed to create debugfs root directory.\n");
2135 goto debugfs_done;
2136 }
2137 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2138 if (!idev->heaps_debug_root) {
2139 pr_err("ion: failed to create debugfs heaps directory.\n");
2140 goto debugfs_done;
2141 }
2142 idev->clients_debug_root = debugfs_create_dir("clients",
2143 idev->debug_root);
2144 if (!idev->clients_debug_root)
2145 pr_err("ion: failed to create debugfs clients directory.\n");
2146
2147debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002148
2149 idev->custom_ioctl = custom_ioctl;
2150 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08002151 mutex_init(&idev->buffer_lock);
2152 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08002153 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08002154 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08002155 ion_root_client = &idev->clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002156 return idev;
2157}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002158EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002159
2160void ion_device_destroy(struct ion_device *dev)
2161{
2162 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08002163 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08002164 /* XXX need to free the heaps and clients ? */
2165 kfree(dev);
2166}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04002167EXPORT_SYMBOL(ion_device_destroy);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002168
2169void __init ion_reserve(struct ion_platform_data *data)
2170{
2171 int i;
2172
2173 for (i = 0; i < data->nr; i++) {
2174 if (data->heaps[i].size == 0)
2175 continue;
2176
2177 if (data->heaps[i].base == 0) {
2178 phys_addr_t paddr;
2179
2180 paddr = memblock_alloc_base(data->heaps[i].size,
2181 data->heaps[i].align,
2182 MEMBLOCK_ALLOC_ANYWHERE);
2183 if (!paddr) {
2184 pr_err("%s: error allocating memblock for heap %d\n",
2185 __func__, i);
2186 continue;
2187 }
2188 data->heaps[i].base = paddr;
2189 } else {
2190 int ret = memblock_reserve(data->heaps[i].base,
2191 data->heaps[i].size);
2192 if (ret)
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002193 pr_err("memblock reserve of %zx@%pa failed\n",
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002194 data->heaps[i].size,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002195 &data->heaps[i].base);
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002196 }
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002197 pr_info("%s: %s reserved base %pa size %zu\n", __func__,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002198 data->heaps[i].name,
Patrick Dalyeeeb9402016-11-01 20:54:41 -07002199 &data->heaps[i].base,
Patrick Daly7e8cbb42016-11-01 18:37:42 -07002200 data->heaps[i].size);
2201 }
2202}
Minming Qi69376be2018-11-01 10:47:10 +08002203
2204void lock_client(struct ion_client *client)
2205{
2206 mutex_lock(&client->lock);
2207}
2208
2209void unlock_client(struct ion_client *client)
2210{
2211 mutex_unlock(&client->lock);
2212}
2213
2214struct ion_buffer *get_buffer(struct ion_handle *handle)
2215{
2216 return handle->buffer;
2217}