blob: d4c6207ca2c13eb5c7964c5f922203a811b5f893 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053019#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080021#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080024#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080026#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080032#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080035#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080036#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080037#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080038#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039
40#include "ion.h"
41#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080042#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -080058 struct plist_head heaps;
Daeseok Youn51108982014-02-10 20:16:50 +090059 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080061 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080062 struct dentry *debug_root;
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -080063 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080065};
66
67/**
68 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080069 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
Colin Cross47b40452013-12-13 14:24:50 -080072 * @idr: an idr space for allocating handle ids
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080073 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080074 * @name: used for debugging
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080075 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080077 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080084 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
Colin Cross47b40452013-12-13 14:24:50 -080087 struct idr idr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080088 struct mutex lock;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080089 const char *name;
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080090 char *display_name;
91 int display_serial;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080092 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95};
96
97/**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
Colin Cross47b40452013-12-13 14:24:50 -0800104 * @id: client-unique id allocated by client->idr
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
Colin Cross47b40452013-12-13 14:24:50 -0800115 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800116};
117
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119{
John Stultze1d855b2013-12-13 19:26:33 -0800120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800122}
123
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800124bool ion_buffer_cached(struct ion_buffer *buffer)
125{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800126 return !!(buffer->flags & ION_FLAG_CACHED);
127}
128
129static inline struct page *ion_buffer_page(struct page *page)
130{
131 return (struct page *)((unsigned long)page & ~(1UL));
132}
133
134static inline bool ion_buffer_page_is_dirty(struct page *page)
135{
136 return !!((unsigned long)page & 1UL);
137}
138
139static inline void ion_buffer_page_dirty(struct page **page)
140{
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142}
143
144static inline void ion_buffer_page_clean(struct page **page)
145{
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800147}
148
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800149/* this function should only be called while dev->lock is held */
150static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152{
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173}
174
175/* this function should only be called while dev->lock is held */
176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
181{
182 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800183 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800184 struct scatterlist *sg;
185 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800186
Ben Marsh411059f2016-03-28 19:26:19 +0200187 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800192 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800196
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800197 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800201 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800207
Greg Hackmann056be392013-12-13 14:23:45 -0800208 buffer->dev = dev;
209 buffer->size = len;
210
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800211 table = heap->ops->map_dma(heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800214 table = ERR_PTR(-EINVAL);
215 if (IS_ERR(table)) {
Rohit kumara56d0922015-09-30 11:07:35 +0530216 ret = -EINVAL;
217 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800218 }
Rohit kumara56d0922015-09-30 11:07:35 +0530219
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800220 buffer->sg_table = table;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800221 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
224 int i, j, k = 0;
225
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
228 ret = -ENOMEM;
Rohit kumara56d0922015-09-30 11:07:35 +0530229 goto err;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800230 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800231
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
234
Colin Cross06e0dca2013-12-13 14:25:02 -0800235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800236 buffer->pages[k++] = page++;
237 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800238 }
239
240 buffer->dev = dev;
241 buffer->size = len;
242 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800243 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530244 /*
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
253 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800255 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000256 sg_dma_len(sg) = sg->length;
257 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800258 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800259 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800260 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800261 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800262
263err:
264 heap->ops->unmap_dma(heap, buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800265err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530266 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800267err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800268 kfree(buffer);
269 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800270}
271
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800272void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800273{
274 if (WARN_ON(buffer->kmap_cnt > 0))
275 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100278 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800279 kfree(buffer);
280}
281
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800282static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283{
284 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800285 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800286 struct ion_device *dev = buffer->dev;
287
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800288 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800289 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800290 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800291
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800292 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293 ion_heap_freelist_add(heap, buffer);
294 else
295 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800296}
297
298static void ion_buffer_get(struct ion_buffer *buffer)
299{
300 kref_get(&buffer->ref);
301}
302
303static int ion_buffer_put(struct ion_buffer *buffer)
304{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800305 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800306}
307
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800308static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800310 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800311 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800312 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800313}
314
315static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316{
317 /*
318 * when a buffer is removed from a handle, if it is not in
319 * any other handles, copy the taskcomm and the pid of the
320 * process it's being removed from into the buffer. At this
321 * point there will be no way to track what processes this buffer is
322 * being used by, it only exists as a dma_buf file descriptor.
323 * The taskcomm and pid can provide a debug hint as to where this fd
324 * is in the system
325 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800326 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800327 buffer->handle_count--;
328 BUG_ON(buffer->handle_count < 0);
329 if (!buffer->handle_count) {
330 struct task_struct *task;
331
332 task = current->group_leader;
333 get_task_comm(buffer->task_comm, task);
334 buffer->pid = task_pid_nr(task);
335 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800336 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800337}
338
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800339static struct ion_handle *ion_handle_create(struct ion_client *client,
340 struct ion_buffer *buffer)
341{
342 struct ion_handle *handle;
343
Ben Marsh411059f2016-03-28 19:26:19 +0200344 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800345 if (!handle)
346 return ERR_PTR(-ENOMEM);
347 kref_init(&handle->ref);
348 RB_CLEAR_NODE(&handle->node);
349 handle->client = client;
350 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800351 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800352 handle->buffer = buffer;
353
354 return handle;
355}
356
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800357static void ion_handle_kmap_put(struct ion_handle *);
358
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800359static void ion_handle_destroy(struct kref *kref)
360{
361 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800362 struct ion_client *client = handle->client;
363 struct ion_buffer *buffer = handle->buffer;
364
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800365 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800366 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800367 ion_handle_kmap_put(handle);
368 mutex_unlock(&buffer->lock);
369
Colin Cross47b40452013-12-13 14:24:50 -0800370 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800371 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800372 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800373
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800374 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800375 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800376
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800377 kfree(handle);
378}
379
380struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381{
382 return handle->buffer;
383}
384
385static void ion_handle_get(struct ion_handle *handle)
386{
387 kref_get(&handle->ref);
388}
389
EunTaik Lee95902322016-02-24 04:38:06 +0000390static int ion_handle_put_nolock(struct ion_handle *handle)
391{
392 int ret;
393
394 ret = kref_put(&handle->ref, ion_handle_destroy);
395
396 return ret;
397}
398
399int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800400{
Colin Cross83271f62013-12-13 14:24:59 -0800401 struct ion_client *client = handle->client;
402 int ret;
403
404 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000405 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800406 mutex_unlock(&client->lock);
407
408 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800409}
410
411static struct ion_handle *ion_handle_lookup(struct ion_client *client,
412 struct ion_buffer *buffer)
413{
Colin Crosse1cf3682013-12-13 14:24:51 -0800414 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800415
Colin Crosse1cf3682013-12-13 14:24:51 -0800416 while (n) {
417 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900418
Colin Crosse1cf3682013-12-13 14:24:51 -0800419 if (buffer < entry->buffer)
420 n = n->rb_left;
421 else if (buffer > entry->buffer)
422 n = n->rb_right;
423 else
424 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800425 }
Colin Cross9e907652013-12-13 14:24:49 -0800426 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800427}
428
EunTaik Lee95902322016-02-24 04:38:06 +0000429static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
430 int id)
431{
432 struct ion_handle *handle;
433
434 handle = idr_find(&client->idr, id);
435 if (handle)
436 ion_handle_get(handle);
437
438 return handle ? handle : ERR_PTR(-EINVAL);
439}
440
441struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
Colin Cross83271f62013-12-13 14:24:59 -0800442 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800443{
Colin Cross83271f62013-12-13 14:24:59 -0800444 struct ion_handle *handle;
445
446 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000447 handle = ion_handle_get_by_id_nolock(client, id);
Colin Cross83271f62013-12-13 14:24:59 -0800448 mutex_unlock(&client->lock);
449
EunTaik Lee95902322016-02-24 04:38:06 +0000450 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800451}
452
John Stultze1d855b2013-12-13 19:26:33 -0800453static bool ion_handle_validate(struct ion_client *client,
454 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800455{
Colin Cross83271f62013-12-13 14:24:59 -0800456 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900457 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800458}
459
460static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461{
Colin Crossb26661d2013-12-13 14:25:05 -0800462 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800463 struct rb_node **p = &client->handles.rb_node;
464 struct rb_node *parent = NULL;
465 struct ion_handle *entry;
466
Colin Crossb26661d2013-12-13 14:25:05 -0800467 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468 if (id < 0)
469 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800470
Colin Crossb26661d2013-12-13 14:25:05 -0800471 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800472
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800473 while (*p) {
474 parent = *p;
475 entry = rb_entry(parent, struct ion_handle, node);
476
Colin Crosse1cf3682013-12-13 14:24:51 -0800477 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800478 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800479 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800480 p = &(*p)->rb_right;
481 else
482 WARN(1, "%s: buffer already found.", __func__);
483 }
484
485 rb_link_node(&handle->node, parent, p);
486 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800487
488 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800489}
490
491struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800492 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800493 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800494{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800495 struct ion_handle *handle;
496 struct ion_device *dev = client->dev;
497 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800498 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800499 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800500
Colin Crosse61fc912013-12-13 19:26:14 -0800501 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800502 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800503 /*
504 * traverse the list of heaps available in this system in priority
505 * order. If the heap type is supported by the client, and matches the
506 * request of the caller allocate from it. Repeat until allocate has
507 * succeeded or all heaps have been tried
508 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800509 len = PAGE_ALIGN(len);
510
Colin Crossa14baf72013-12-13 14:25:00 -0800511 if (!len)
512 return ERR_PTR(-EINVAL);
513
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800514 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800515 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800516 /* if the caller didn't specify this heap id */
517 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800518 continue;
519 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800520 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800521 break;
522 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800523 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800524
KyongHo Cho54ac07842013-12-13 14:23:39 -0800525 if (buffer == NULL)
526 return ERR_PTR(-ENODEV);
527
528 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200529 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800530
531 handle = ion_handle_create(client, buffer);
532
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800533 /*
534 * ion_buffer_create will create a buffer with a ref_cnt of 1,
535 * and ion_handle_create will take a second reference, drop one here
536 */
537 ion_buffer_put(buffer);
538
Colin Cross47b40452013-12-13 14:24:50 -0800539 if (IS_ERR(handle))
540 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800541
Colin Cross47b40452013-12-13 14:24:50 -0800542 mutex_lock(&client->lock);
543 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800544 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800545 if (ret) {
546 ion_handle_put(handle);
547 handle = ERR_PTR(ret);
548 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800549
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800550 return handle;
551}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800552EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800553
EunTaik Lee95902322016-02-24 04:38:06 +0000554static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800555{
556 bool valid_handle;
557
558 BUG_ON(client != handle->client);
559
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800560 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800561
562 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800563 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800564 return;
565 }
EunTaik Lee95902322016-02-24 04:38:06 +0000566 ion_handle_put_nolock(handle);
567}
568
569void ion_free(struct ion_client *client, struct ion_handle *handle)
570{
571 BUG_ON(client != handle->client);
572
573 mutex_lock(&client->lock);
574 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800575 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800576}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800577EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800578
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800579int ion_phys(struct ion_client *client, struct ion_handle *handle,
580 ion_phys_addr_t *addr, size_t *len)
581{
582 struct ion_buffer *buffer;
583 int ret;
584
585 mutex_lock(&client->lock);
586 if (!ion_handle_validate(client, handle)) {
587 mutex_unlock(&client->lock);
588 return -EINVAL;
589 }
590
591 buffer = handle->buffer;
592
593 if (!buffer->heap->ops->phys) {
Mitchel Humpherysd9954892015-02-13 18:05:34 -0800594 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
595 __func__, buffer->heap->name, buffer->heap->type);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800596 mutex_unlock(&client->lock);
597 return -ENODEV;
598 }
599 mutex_unlock(&client->lock);
600 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
601 return ret;
602}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800603EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800604
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800605static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
606{
607 void *vaddr;
608
609 if (buffer->kmap_cnt) {
610 buffer->kmap_cnt++;
611 return buffer->vaddr;
612 }
613 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800614 if (WARN_ONCE(vaddr == NULL,
615 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800616 return ERR_PTR(-EINVAL);
617 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800618 return vaddr;
619 buffer->vaddr = vaddr;
620 buffer->kmap_cnt++;
621 return vaddr;
622}
623
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800624static void *ion_handle_kmap_get(struct ion_handle *handle)
625{
626 struct ion_buffer *buffer = handle->buffer;
627 void *vaddr;
628
629 if (handle->kmap_cnt) {
630 handle->kmap_cnt++;
631 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800632 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800633 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800634 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800635 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800636 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800637 return vaddr;
638}
639
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800640static void ion_buffer_kmap_put(struct ion_buffer *buffer)
641{
642 buffer->kmap_cnt--;
643 if (!buffer->kmap_cnt) {
644 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
645 buffer->vaddr = NULL;
646 }
647}
648
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800649static void ion_handle_kmap_put(struct ion_handle *handle)
650{
651 struct ion_buffer *buffer = handle->buffer;
652
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700653 if (!handle->kmap_cnt) {
654 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
655 return;
656 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800657 handle->kmap_cnt--;
658 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800659 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800660}
661
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800662void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
663{
664 struct ion_buffer *buffer;
665 void *vaddr;
666
667 mutex_lock(&client->lock);
668 if (!ion_handle_validate(client, handle)) {
669 pr_err("%s: invalid handle passed to map_kernel.\n",
670 __func__);
671 mutex_unlock(&client->lock);
672 return ERR_PTR(-EINVAL);
673 }
674
675 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800676
677 if (!handle->buffer->heap->ops->map_kernel) {
678 pr_err("%s: map_kernel is not implemented by this heap.\n",
679 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800680 mutex_unlock(&client->lock);
681 return ERR_PTR(-ENODEV);
682 }
683
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800684 mutex_lock(&buffer->lock);
685 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800686 mutex_unlock(&buffer->lock);
687 mutex_unlock(&client->lock);
688 return vaddr;
689}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800690EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800691
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800692void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
693{
694 struct ion_buffer *buffer;
695
696 mutex_lock(&client->lock);
697 buffer = handle->buffer;
698 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800699 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800700 mutex_unlock(&buffer->lock);
701 mutex_unlock(&client->lock);
702}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800703EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800704
Neil Zhang948c4db2016-01-26 17:39:06 +0800705static struct mutex debugfs_mutex;
706static struct rb_root *ion_root_client;
707static int is_client_alive(struct ion_client *client)
708{
709 struct rb_node *node;
710 struct ion_client *tmp;
711 struct ion_device *dev;
712
713 node = ion_root_client->rb_node;
714 dev = container_of(ion_root_client, struct ion_device, clients);
715
716 down_read(&dev->lock);
717 while (node) {
718 tmp = rb_entry(node, struct ion_client, node);
719 if (client < tmp) {
720 node = node->rb_left;
721 } else if (client > tmp) {
722 node = node->rb_right;
723 } else {
724 up_read(&dev->lock);
725 return 1;
726 }
727 }
728
729 up_read(&dev->lock);
730 return 0;
731}
732
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800733static int ion_debug_client_show(struct seq_file *s, void *unused)
734{
735 struct ion_client *client = s->private;
736 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800737 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800738 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800739 int i;
740
Neil Zhang948c4db2016-01-26 17:39:06 +0800741 mutex_lock(&debugfs_mutex);
742 if (!is_client_alive(client)) {
743 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
744 client);
745 mutex_unlock(&debugfs_mutex);
746 return 0;
747 }
748
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800749 mutex_lock(&client->lock);
750 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
751 struct ion_handle *handle = rb_entry(n, struct ion_handle,
752 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800753 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800754
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800755 if (!names[id])
756 names[id] = handle->buffer->heap->name;
757 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800758 }
759 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800760 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800761
762 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800763 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800764 if (!names[i])
765 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800766 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800767 }
768 return 0;
769}
770
771static int ion_debug_client_open(struct inode *inode, struct file *file)
772{
773 return single_open(file, ion_debug_client_show, inode->i_private);
774}
775
776static const struct file_operations debug_client_fops = {
777 .open = ion_debug_client_open,
778 .read = seq_read,
779 .llseek = seq_lseek,
780 .release = single_release,
781};
782
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800783static int ion_get_client_serial(const struct rb_root *root,
784 const unsigned char *name)
785{
786 int serial = -1;
787 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900788
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800789 for (node = rb_first(root); node; node = rb_next(node)) {
790 struct ion_client *client = rb_entry(node, struct ion_client,
791 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900792
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800793 if (strcmp(client->name, name))
794 continue;
795 serial = max(serial, client->display_serial);
796 }
797 return serial + 1;
798}
799
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800800struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800801 const char *name)
802{
803 struct ion_client *client;
804 struct task_struct *task;
805 struct rb_node **p;
806 struct rb_node *parent = NULL;
807 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800808 pid_t pid;
809
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800810 if (!name) {
811 pr_err("%s: Name cannot be null\n", __func__);
812 return ERR_PTR(-EINVAL);
813 }
814
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800815 get_task_struct(current->group_leader);
816 task_lock(current->group_leader);
817 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530818 /*
819 * don't bother to store task struct for kernel threads,
820 * they can't be killed anyway
821 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800822 if (current->group_leader->flags & PF_KTHREAD) {
823 put_task_struct(current->group_leader);
824 task = NULL;
825 } else {
826 task = current->group_leader;
827 }
828 task_unlock(current->group_leader);
829
Ben Marsh411059f2016-03-28 19:26:19 +0200830 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800831 if (!client)
832 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800833
834 client->dev = dev;
835 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800836 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800837 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800838 client->task = task;
839 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800840 client->name = kstrdup(name, GFP_KERNEL);
841 if (!client->name)
842 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800843
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800844 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800845 client->display_serial = ion_get_client_serial(&dev->clients, name);
846 client->display_name = kasprintf(
847 GFP_KERNEL, "%s-%d", name, client->display_serial);
848 if (!client->display_name) {
849 up_write(&dev->lock);
850 goto err_free_client_name;
851 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800852 p = &dev->clients.rb_node;
853 while (*p) {
854 parent = *p;
855 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800856
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800857 if (client < entry)
858 p = &(*p)->rb_left;
859 else if (client > entry)
860 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800861 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800862 rb_link_node(&client->node, parent, p);
863 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800864
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800865 client->debug_root = debugfs_create_file(client->display_name, 0664,
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800866 dev->clients_debug_root,
867 client, &debug_client_fops);
868 if (!client->debug_root) {
869 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700870
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800871 path = dentry_path(dev->clients_debug_root, buf, 256);
872 pr_err("Failed to create client debugfs at %s/%s\n",
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800873 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800874 }
875
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800876 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800877
878 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800879
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800880err_free_client_name:
881 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800882err_free_client:
883 kfree(client);
884err_put_task_struct:
885 if (task)
886 put_task_struct(current->group_leader);
887 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800888}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800889EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800890
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800891void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800892{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800893 struct ion_device *dev = client->dev;
894 struct rb_node *n;
895
896 pr_debug("%s: %d\n", __func__, __LINE__);
Neil Zhang948c4db2016-01-26 17:39:06 +0800897 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800898 while ((n = rb_first(&client->handles))) {
899 struct ion_handle *handle = rb_entry(n, struct ion_handle,
900 node);
901 ion_handle_destroy(&handle->ref);
902 }
Colin Cross47b40452013-12-13 14:24:50 -0800903
Colin Cross47b40452013-12-13 14:24:50 -0800904 idr_destroy(&client->idr);
905
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800906 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800907 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800908 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800909 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800910 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800911 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800912
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800913 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800914 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800915 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800916 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800917}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800918EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800919
Rebecca Schultz Zavince1f1472013-12-13 14:23:44 -0800920struct sg_table *ion_sg_table(struct ion_client *client,
921 struct ion_handle *handle)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800922{
923 struct ion_buffer *buffer;
924 struct sg_table *table;
925
926 mutex_lock(&client->lock);
927 if (!ion_handle_validate(client, handle)) {
928 pr_err("%s: invalid handle passed to map_dma.\n",
929 __func__);
930 mutex_unlock(&client->lock);
931 return ERR_PTR(-EINVAL);
932 }
933 buffer = handle->buffer;
934 table = buffer->sg_table;
935 mutex_unlock(&client->lock);
936 return table;
937}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800938EXPORT_SYMBOL(ion_sg_table);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800939
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800940static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
941 struct device *dev,
942 enum dma_data_direction direction);
943
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800944static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
945 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800946{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800947 struct dma_buf *dmabuf = attachment->dmabuf;
948 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800949
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800950 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800951 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800952}
953
954static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
955 struct sg_table *table,
956 enum dma_data_direction direction)
957{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800958}
959
Colin Crosse946b202013-12-13 14:25:01 -0800960void ion_pages_sync_for_device(struct device *dev, struct page *page,
961 size_t size, enum dma_data_direction dir)
962{
963 struct scatterlist sg;
964
965 sg_init_table(&sg, 1);
966 sg_set_page(&sg, page, size, 0);
967 /*
968 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530969 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800970 * hardware.
971 */
972 sg_dma_address(&sg) = page_to_phys(page);
973 dma_sync_sg_for_device(dev, &sg, 1, dir);
974}
975
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800976struct ion_vma_list {
977 struct list_head list;
978 struct vm_area_struct *vma;
979};
980
981static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
982 struct device *dev,
983 enum dma_data_direction dir)
984{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800985 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800986 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
987 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800988
989 pr_debug("%s: syncing for device %s\n", __func__,
990 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800991
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800992 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800993 return;
994
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800995 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800996 for (i = 0; i < pages; i++) {
997 struct page *page = buffer->pages[i];
998
999 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -08001000 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1001 PAGE_SIZE, dir);
1002
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001003 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001004 }
1005 list_for_each_entry(vma_list, &buffer->vmas, list) {
1006 struct vm_area_struct *vma = vma_list->vma;
1007
1008 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1009 NULL);
1010 }
1011 mutex_unlock(&buffer->lock);
1012}
1013
Colin Crossf63958d2013-12-13 19:26:28 -08001014static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001015{
1016 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -08001017 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001018 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001019
1020 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001021 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001022 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -08001023
1024 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1025 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001026 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -08001027 if (ret)
1028 return VM_FAULT_ERROR;
1029
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001030 return VM_FAULT_NOPAGE;
1031}
1032
1033static void ion_vm_open(struct vm_area_struct *vma)
1034{
1035 struct ion_buffer *buffer = vma->vm_private_data;
1036 struct ion_vma_list *vma_list;
1037
Ben Marsh411059f2016-03-28 19:26:19 +02001038 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001039 if (!vma_list)
1040 return;
1041 vma_list->vma = vma;
1042 mutex_lock(&buffer->lock);
1043 list_add(&vma_list->list, &buffer->vmas);
1044 mutex_unlock(&buffer->lock);
1045 pr_debug("%s: adding %p\n", __func__, vma);
1046}
1047
1048static void ion_vm_close(struct vm_area_struct *vma)
1049{
1050 struct ion_buffer *buffer = vma->vm_private_data;
1051 struct ion_vma_list *vma_list, *tmp;
1052
1053 pr_debug("%s\n", __func__);
1054 mutex_lock(&buffer->lock);
1055 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1056 if (vma_list->vma != vma)
1057 continue;
1058 list_del(&vma_list->list);
1059 kfree(vma_list);
1060 pr_debug("%s: deleting %p\n", __func__, vma);
1061 break;
1062 }
1063 mutex_unlock(&buffer->lock);
1064}
1065
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001066static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001067 .open = ion_vm_open,
1068 .close = ion_vm_close,
1069 .fault = ion_vm_fault,
1070};
1071
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001072static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001073{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001074 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001075 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001076
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001077 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +02001078 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1079 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001080 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001081 }
1082
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001083 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -08001084 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1085 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001086 vma->vm_private_data = buffer;
1087 vma->vm_ops = &ion_vma_ops;
1088 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001089 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001090 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001091
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001092 if (!(buffer->flags & ION_FLAG_CACHED))
1093 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1094
1095 mutex_lock(&buffer->lock);
1096 /* now map it to userspace */
1097 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1098 mutex_unlock(&buffer->lock);
1099
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001100 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001101 pr_err("%s: failure mapping buffer to userspace\n",
1102 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001103
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001104 return ret;
1105}
1106
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001107static void ion_dma_buf_release(struct dma_buf *dmabuf)
1108{
1109 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001110
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001111 ion_buffer_put(buffer);
1112}
1113
1114static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1115{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001116 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001117
Greg Hackmann12edf532013-12-13 14:24:00 -08001118 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001119}
1120
1121static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1122 void *ptr)
1123{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001124}
1125
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001126static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001127 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001128{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001129 struct ion_buffer *buffer = dmabuf->priv;
1130 void *vaddr;
1131
1132 if (!buffer->heap->ops->map_kernel) {
1133 pr_err("%s: map kernel is not implemented by this heap.\n",
1134 __func__);
1135 return -ENODEV;
1136 }
1137
1138 mutex_lock(&buffer->lock);
1139 vaddr = ion_buffer_kmap_get(buffer);
1140 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301141 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001142}
1143
Chris Wilson18b862d2016-03-18 20:02:39 +00001144static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1145 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001146{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001147 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001148
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001149 mutex_lock(&buffer->lock);
1150 ion_buffer_kmap_put(buffer);
1151 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001152
1153 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001154}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001155
Colin Crossf63958d2013-12-13 19:26:28 -08001156static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001157 .map_dma_buf = ion_map_dma_buf,
1158 .unmap_dma_buf = ion_unmap_dma_buf,
1159 .mmap = ion_mmap,
1160 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001161 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1162 .end_cpu_access = ion_dma_buf_end_cpu_access,
1163 .kmap_atomic = ion_dma_buf_kmap,
1164 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001165 .kmap = ion_dma_buf_kmap,
1166 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001167};
1168
Johan Mossberg22ba4322013-12-13 14:24:34 -08001169struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1170 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001171{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001172 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001173 struct ion_buffer *buffer;
1174 struct dma_buf *dmabuf;
1175 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301176
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001177 mutex_lock(&client->lock);
1178 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001179 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001180 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001181 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001182 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001183 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001184 buffer = handle->buffer;
1185 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001186 mutex_unlock(&client->lock);
1187
Sumit Semwal72449cb2015-02-21 09:00:17 +05301188 exp_info.ops = &dma_buf_ops;
1189 exp_info.size = buffer->size;
1190 exp_info.flags = O_RDWR;
1191 exp_info.priv = buffer;
1192
Sumit Semwald8fbe342015-01-23 12:53:43 +05301193 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001194 if (IS_ERR(dmabuf)) {
1195 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001196 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001197 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001198
1199 return dmabuf;
1200}
1201EXPORT_SYMBOL(ion_share_dma_buf);
1202
1203int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1204{
1205 struct dma_buf *dmabuf;
1206 int fd;
1207
1208 dmabuf = ion_share_dma_buf(client, handle);
1209 if (IS_ERR(dmabuf))
1210 return PTR_ERR(dmabuf);
1211
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001212 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001213 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001214 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001215
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001216 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001217}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001218EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001219
Rohit kumar9f903812016-01-12 09:31:46 +05301220struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1221 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001222{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001223 struct ion_buffer *buffer;
1224 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001225 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001226
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001227 /* if this memory came from ion */
1228
1229 if (dmabuf->ops != &dma_buf_ops) {
1230 pr_err("%s: can not import dmabuf from another exporter\n",
1231 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001232 return ERR_PTR(-EINVAL);
1233 }
1234 buffer = dmabuf->priv;
1235
1236 mutex_lock(&client->lock);
1237 /* if a handle exists for this buffer just take a reference to it */
1238 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001239 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001240 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001241 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001242 goto end;
1243 }
Colin Cross83271f62013-12-13 14:24:59 -08001244
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001245 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001246 if (IS_ERR(handle)) {
1247 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001248 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001249 }
Colin Cross83271f62013-12-13 14:24:59 -08001250
Colin Cross47b40452013-12-13 14:24:50 -08001251 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001252 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001253 if (ret) {
1254 ion_handle_put(handle);
1255 handle = ERR_PTR(ret);
1256 }
Colin Cross83271f62013-12-13 14:24:59 -08001257
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001258end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001259 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001260}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001261EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001262
Rohit kumar9f903812016-01-12 09:31:46 +05301263struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1264{
1265 struct dma_buf *dmabuf;
1266 struct ion_handle *handle;
1267
1268 dmabuf = dma_buf_get(fd);
1269 if (IS_ERR(dmabuf))
1270 return ERR_CAST(dmabuf);
1271
1272 handle = ion_import_dma_buf(client, dmabuf);
1273 dma_buf_put(dmabuf);
1274 return handle;
1275}
1276EXPORT_SYMBOL(ion_import_dma_buf_fd);
1277
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001278static int ion_sync_for_device(struct ion_client *client, int fd)
1279{
1280 struct dma_buf *dmabuf;
1281 struct ion_buffer *buffer;
1282
1283 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001284 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001285 return PTR_ERR(dmabuf);
1286
1287 /* if this memory came from ion */
1288 if (dmabuf->ops != &dma_buf_ops) {
1289 pr_err("%s: can not sync dmabuf from another exporter\n",
1290 __func__);
1291 dma_buf_put(dmabuf);
1292 return -EINVAL;
1293 }
1294 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001295
1296 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1297 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001298 dma_buf_put(dmabuf);
1299 return 0;
1300}
1301
Colin Crossdb866e32013-12-13 19:26:16 -08001302/* fix up the cases where the ioctl direction bits are incorrect */
1303static unsigned int ion_ioctl_dir(unsigned int cmd)
1304{
1305 switch (cmd) {
1306 case ION_IOC_SYNC:
1307 case ION_IOC_FREE:
1308 case ION_IOC_CUSTOM:
1309 return _IOC_WRITE;
1310 default:
1311 return _IOC_DIR(cmd);
1312 }
1313}
1314
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001315static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1316{
1317 struct ion_client *client = filp->private_data;
Colin Crossdb866e32013-12-13 19:26:16 -08001318 struct ion_device *dev = client->dev;
1319 struct ion_handle *cleanup_handle = NULL;
1320 int ret = 0;
1321 unsigned int dir;
1322
1323 union {
1324 struct ion_fd_data fd;
1325 struct ion_allocation_data allocation;
1326 struct ion_handle_data handle;
1327 struct ion_custom_data custom;
1328 } data;
1329
1330 dir = ion_ioctl_dir(cmd);
1331
1332 if (_IOC_SIZE(cmd) > sizeof(data))
1333 return -EINVAL;
1334
1335 if (dir & _IOC_WRITE)
1336 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1337 return -EFAULT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001338
1339 switch (cmd) {
1340 case ION_IOC_ALLOC:
1341 {
Colin Cross47b40452013-12-13 14:24:50 -08001342 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001343
Colin Crossdb866e32013-12-13 19:26:16 -08001344 handle = ion_alloc(client, data.allocation.len,
1345 data.allocation.align,
1346 data.allocation.heap_id_mask,
1347 data.allocation.flags);
Colin Cross47b40452013-12-13 14:24:50 -08001348 if (IS_ERR(handle))
1349 return PTR_ERR(handle);
1350
Colin Crossdb866e32013-12-13 19:26:16 -08001351 data.allocation.handle = handle->id;
KyongHo Cho54ac07842013-12-13 14:23:39 -08001352
Colin Crossdb866e32013-12-13 19:26:16 -08001353 cleanup_handle = handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001354 break;
1355 }
1356 case ION_IOC_FREE:
1357 {
Colin Cross47b40452013-12-13 14:24:50 -08001358 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001359
EunTaik Lee95902322016-02-24 04:38:06 +00001360 mutex_lock(&client->lock);
1361 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1362 if (IS_ERR(handle)) {
1363 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -08001364 return PTR_ERR(handle);
EunTaik Lee95902322016-02-24 04:38:06 +00001365 }
1366 ion_free_nolock(client, handle);
1367 ion_handle_put_nolock(handle);
1368 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001369 break;
1370 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001371 case ION_IOC_SHARE:
Rebecca Schultz Zavindf0f6c72013-12-13 14:24:24 -08001372 case ION_IOC_MAP:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001373 {
Colin Cross47b40452013-12-13 14:24:50 -08001374 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001375
Colin Crossdb866e32013-12-13 19:26:16 -08001376 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001377 if (IS_ERR(handle))
1378 return PTR_ERR(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001379 data.fd.fd = ion_share_dma_buf_fd(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001380 ion_handle_put(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001381 if (data.fd.fd < 0)
1382 ret = data.fd.fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001383 break;
1384 }
1385 case ION_IOC_IMPORT:
1386 {
Colin Cross47b40452013-12-13 14:24:50 -08001387 struct ion_handle *handle;
Seunghun Lee10f62862014-05-01 01:30:23 +09001388
Rohit kumar9f903812016-01-12 09:31:46 +05301389 handle = ion_import_dma_buf_fd(client, data.fd.fd);
Colin Cross47b40452013-12-13 14:24:50 -08001390 if (IS_ERR(handle))
1391 ret = PTR_ERR(handle);
1392 else
Colin Crossdb866e32013-12-13 19:26:16 -08001393 data.handle.handle = handle->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001394 break;
1395 }
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001396 case ION_IOC_SYNC:
1397 {
Colin Crossdb866e32013-12-13 19:26:16 -08001398 ret = ion_sync_for_device(client, data.fd.fd);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001399 break;
1400 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001401 case ION_IOC_CUSTOM:
1402 {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001403 if (!dev->custom_ioctl)
1404 return -ENOTTY;
Colin Crossdb866e32013-12-13 19:26:16 -08001405 ret = dev->custom_ioctl(client, data.custom.cmd,
1406 data.custom.arg);
1407 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001408 }
1409 default:
1410 return -ENOTTY;
1411 }
Colin Crossdb866e32013-12-13 19:26:16 -08001412
1413 if (dir & _IOC_READ) {
1414 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1415 if (cleanup_handle)
1416 ion_free(client, cleanup_handle);
1417 return -EFAULT;
1418 }
1419 }
1420 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001421}
1422
1423static int ion_release(struct inode *inode, struct file *file)
1424{
1425 struct ion_client *client = file->private_data;
1426
1427 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001428 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001429 return 0;
1430}
1431
1432static int ion_open(struct inode *inode, struct file *file)
1433{
1434 struct miscdevice *miscdev = file->private_data;
1435 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1436 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001437 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001438
1439 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001440 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1441 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001442 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001443 return PTR_ERR(client);
1444 file->private_data = client;
1445
1446 return 0;
1447}
1448
1449static const struct file_operations ion_fops = {
1450 .owner = THIS_MODULE,
1451 .open = ion_open,
1452 .release = ion_release,
1453 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001454 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001455};
1456
1457static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001458 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001459{
1460 size_t size = 0;
1461 struct rb_node *n;
1462
1463 mutex_lock(&client->lock);
1464 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1465 struct ion_handle *handle = rb_entry(n,
1466 struct ion_handle,
1467 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001468 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001469 size += handle->buffer->size;
1470 }
1471 mutex_unlock(&client->lock);
1472 return size;
1473}
1474
1475static int ion_debug_heap_show(struct seq_file *s, void *unused)
1476{
1477 struct ion_heap *heap = s->private;
1478 struct ion_device *dev = heap->dev;
1479 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001480 size_t total_size = 0;
1481 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001482
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001483 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001484 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001485
Neil Zhang948c4db2016-01-26 17:39:06 +08001486 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001487 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001488 struct ion_client *client = rb_entry(n, struct ion_client,
1489 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001490 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001491
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001492 if (!size)
1493 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001494 if (client->task) {
1495 char task_comm[TASK_COMM_LEN];
1496
1497 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001498 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001499 client->pid, size);
1500 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001501 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001502 client->pid, size);
1503 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001504 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001505 mutex_unlock(&debugfs_mutex);
1506
Iulia Manda164ad862014-03-11 20:12:29 +02001507 seq_puts(s, "----------------------------------------------------\n");
1508 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001509 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001510 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1511 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1512 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001513 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001514 continue;
1515 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001516 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001517 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001518 buffer->task_comm, buffer->pid,
1519 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001520 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001521 total_orphaned_size += buffer->size;
1522 }
1523 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001524 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001525 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001526 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001527 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001528 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001529 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001530 seq_printf(s, "%16s %16zu\n", "deferred free",
Colin Cross2540c732013-12-13 14:24:47 -08001531 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001532 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001533
1534 if (heap->debug_show)
1535 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001536
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001537 return 0;
1538}
1539
1540static int ion_debug_heap_open(struct inode *inode, struct file *file)
1541{
1542 return single_open(file, ion_debug_heap_show, inode->i_private);
1543}
1544
1545static const struct file_operations debug_heap_fops = {
1546 .open = ion_debug_heap_open,
1547 .read = seq_read,
1548 .llseek = seq_lseek,
1549 .release = single_release,
1550};
1551
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001552static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001553{
John Stultze1d855b2013-12-13 19:26:33 -08001554 struct ion_heap *heap = data;
1555 struct shrink_control sc;
1556 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001557
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001558 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001559 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001560
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001561 if (!val) {
1562 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1563 sc.nr_to_scan = objs;
1564 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001565
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001566 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001567 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001568}
1569
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001570static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001571{
John Stultze1d855b2013-12-13 19:26:33 -08001572 struct ion_heap *heap = data;
1573 struct shrink_control sc;
1574 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001575
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001576 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001577 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001578
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001579 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001580 *val = objs;
1581 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001582}
1583
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001584DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001585 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001586
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001587void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1588{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001589 struct dentry *debug_file;
1590
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001591 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1592 !heap->ops->unmap_dma)
1593 pr_err("%s: can not add heap with invalid ops struct.\n",
1594 __func__);
1595
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001596 spin_lock_init(&heap->free_lock);
1597 heap->free_list_size = 0;
1598
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001599 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1600 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001601
Colin Crossb9daf0b2014-02-17 13:58:38 -08001602 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1603 ion_heap_init_shrinker(heap);
1604
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001605 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001606 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301607 /*
1608 * use negative heap->id to reverse the priority -- when traversing
1609 * the list later attempt higher id numbers first
1610 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001611 plist_node_init(&heap->node, -heap->id);
1612 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001613 debug_file = debugfs_create_file(heap->name, 0664,
1614 dev->heaps_debug_root, heap,
1615 &debug_heap_fops);
1616
1617 if (!debug_file) {
1618 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001619
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001620 path = dentry_path(dev->heaps_debug_root, buf, 256);
1621 pr_err("Failed to create heap debugfs at %s/%s\n",
1622 path, heap->name);
1623 }
1624
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001625 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001626 char debug_name[64];
1627
1628 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001629 debug_file = debugfs_create_file(
1630 debug_name, 0644, dev->heaps_debug_root, heap,
1631 &debug_shrink_fops);
1632 if (!debug_file) {
1633 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001634
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001635 path = dentry_path(dev->heaps_debug_root, buf, 256);
1636 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1637 path, debug_name);
1638 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001639 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001640
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001641 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001642}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001643EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001644
1645struct ion_device *ion_device_create(long (*custom_ioctl)
1646 (struct ion_client *client,
1647 unsigned int cmd,
1648 unsigned long arg))
1649{
1650 struct ion_device *idev;
1651 int ret;
1652
Ben Marsh411059f2016-03-28 19:26:19 +02001653 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001654 if (!idev)
1655 return ERR_PTR(-ENOMEM);
1656
1657 idev->dev.minor = MISC_DYNAMIC_MINOR;
1658 idev->dev.name = "ion";
1659 idev->dev.fops = &ion_fops;
1660 idev->dev.parent = NULL;
1661 ret = misc_register(&idev->dev);
1662 if (ret) {
1663 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301664 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001665 return ERR_PTR(ret);
1666 }
1667
1668 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001669 if (!idev->debug_root) {
1670 pr_err("ion: failed to create debugfs root directory.\n");
1671 goto debugfs_done;
1672 }
1673 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1674 if (!idev->heaps_debug_root) {
1675 pr_err("ion: failed to create debugfs heaps directory.\n");
1676 goto debugfs_done;
1677 }
1678 idev->clients_debug_root = debugfs_create_dir("clients",
1679 idev->debug_root);
1680 if (!idev->clients_debug_root)
1681 pr_err("ion: failed to create debugfs clients directory.\n");
1682
1683debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001684
1685 idev->custom_ioctl = custom_ioctl;
1686 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001687 mutex_init(&idev->buffer_lock);
1688 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001689 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001690 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001691 ion_root_client = &idev->clients;
1692 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001693 return idev;
1694}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001695EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001696
1697void ion_device_destroy(struct ion_device *dev)
1698{
1699 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001700 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001701 /* XXX need to free the heaps and clients ? */
1702 kfree(dev);
1703}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001704EXPORT_SYMBOL(ion_device_destroy);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001705
1706void __init ion_reserve(struct ion_platform_data *data)
1707{
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001708 int i;
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001709
1710 for (i = 0; i < data->nr; i++) {
1711 if (data->heaps[i].size == 0)
1712 continue;
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001713
1714 if (data->heaps[i].base == 0) {
1715 phys_addr_t paddr;
Seunghun Lee10f62862014-05-01 01:30:23 +09001716
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001717 paddr = memblock_alloc_base(data->heaps[i].size,
1718 data->heaps[i].align,
1719 MEMBLOCK_ALLOC_ANYWHERE);
1720 if (!paddr) {
Daeseok Youn51108982014-02-10 20:16:50 +09001721 pr_err("%s: error allocating memblock for heap %d\n",
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001722 __func__, i);
1723 continue;
1724 }
1725 data->heaps[i].base = paddr;
1726 } else {
1727 int ret = memblock_reserve(data->heaps[i].base,
1728 data->heaps[i].size);
1729 if (ret)
Colin Crosse61fc912013-12-13 19:26:14 -08001730 pr_err("memblock reserve of %zx@%lx failed\n",
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001731 data->heaps[i].size,
1732 data->heaps[i].base);
1733 }
Colin Crosse61fc912013-12-13 19:26:14 -08001734 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001735 data->heaps[i].name,
1736 data->heaps[i].base,
1737 data->heaps[i].size);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001738 }
1739}