blob: 2671f9fa89d71876c0ee6ade701c431fdfad7955 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053019#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080021#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080024#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080026#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080032#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080035#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080036#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080037#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080038#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039
40#include "ion.h"
41#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080042#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -080058 struct plist_head heaps;
Daeseok Youn51108982014-02-10 20:16:50 +090059 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080061 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080062 struct dentry *debug_root;
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -080063 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080065};
66
67/**
68 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080069 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
Colin Cross47b40452013-12-13 14:24:50 -080072 * @idr: an idr space for allocating handle ids
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080073 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080074 * @name: used for debugging
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080075 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080077 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080084 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
Colin Cross47b40452013-12-13 14:24:50 -080087 struct idr idr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080088 struct mutex lock;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080089 const char *name;
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080090 char *display_name;
91 int display_serial;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080092 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95};
96
97/**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
Colin Cross47b40452013-12-13 14:24:50 -0800104 * @id: client-unique id allocated by client->idr
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
Colin Cross47b40452013-12-13 14:24:50 -0800115 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800116};
117
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119{
John Stultze1d855b2013-12-13 19:26:33 -0800120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800122}
123
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800124bool ion_buffer_cached(struct ion_buffer *buffer)
125{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800126 return !!(buffer->flags & ION_FLAG_CACHED);
127}
128
129static inline struct page *ion_buffer_page(struct page *page)
130{
131 return (struct page *)((unsigned long)page & ~(1UL));
132}
133
134static inline bool ion_buffer_page_is_dirty(struct page *page)
135{
136 return !!((unsigned long)page & 1UL);
137}
138
139static inline void ion_buffer_page_dirty(struct page **page)
140{
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142}
143
144static inline void ion_buffer_page_clean(struct page **page)
145{
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800147}
148
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800149/* this function should only be called while dev->lock is held */
150static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152{
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173}
174
175/* this function should only be called while dev->lock is held */
176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800181{
182 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800183 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800184 struct scatterlist *sg;
185 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800186
Ben Marsh411059f2016-03-28 19:26:19 +0200187 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800192 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800196
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800197 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800201 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800207
Laura Abbottf82ad602016-08-08 09:52:56 -0700208 if (buffer->sg_table == NULL) {
209 WARN_ONCE(1, "This heap needs to set the sgtable");
Rohit kumara56d0922015-09-30 11:07:35 +0530210 ret = -EINVAL;
211 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800212 }
Rohit kumara56d0922015-09-30 11:07:35 +0530213
Laura Abbottf82ad602016-08-08 09:52:56 -0700214 table = buffer->sg_table;
215 buffer->dev = dev;
216 buffer->size = len;
217
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800218 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800219 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
220 struct scatterlist *sg;
221 int i, j, k = 0;
222
223 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
224 if (!buffer->pages) {
225 ret = -ENOMEM;
Laura Abbottf82ad602016-08-08 09:52:56 -0700226 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800227 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800228
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800229 for_each_sg(table->sgl, sg, table->nents, i) {
230 struct page *page = sg_page(sg);
231
Colin Cross06e0dca2013-12-13 14:25:02 -0800232 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800233 buffer->pages[k++] = page++;
234 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800235 }
236
237 buffer->dev = dev;
238 buffer->size = len;
239 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800240 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530241 /*
242 * this will set up dma addresses for the sglist -- it is not
243 * technically correct as per the dma api -- a specific
244 * device isn't really taking ownership here. However, in practice on
245 * our systems the only dma_address space is physical addresses.
246 * Additionally, we can't afford the overhead of invalidating every
247 * allocation via dma_map_sg. The implicit contract here is that
248 * memory coming from the heaps is ready for dma, ie if it has a
249 * cached mapping that mapping has been invalidated
250 */
Liviu Dudau70bc9162016-01-21 11:57:47 +0000251 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800252 sg_dma_address(sg) = sg_phys(sg);
Liviu Dudau70bc9162016-01-21 11:57:47 +0000253 sg_dma_len(sg) = sg->length;
254 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800255 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800256 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800257 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800258 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800259
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800260err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530261 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800262err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800263 kfree(buffer);
264 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800265}
266
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800267void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800268{
269 if (WARN_ON(buffer->kmap_cnt > 0))
270 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800271 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100272 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800273 kfree(buffer);
274}
275
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800276static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800277{
278 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800279 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800280 struct ion_device *dev = buffer->dev;
281
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800282 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800284 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800285
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800286 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287 ion_heap_freelist_add(heap, buffer);
288 else
289 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294 kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800299 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300}
301
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800304 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800305 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800306 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311 /*
312 * when a buffer is removed from a handle, if it is not in
313 * any other handles, copy the taskcomm and the pid of the
314 * process it's being removed from into the buffer. At this
315 * point there will be no way to track what processes this buffer is
316 * being used by, it only exists as a dma_buf file descriptor.
317 * The taskcomm and pid can provide a debug hint as to where this fd
318 * is in the system
319 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800320 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800321 buffer->handle_count--;
322 BUG_ON(buffer->handle_count < 0);
323 if (!buffer->handle_count) {
324 struct task_struct *task;
325
326 task = current->group_leader;
327 get_task_comm(buffer->task_comm, task);
328 buffer->pid = task_pid_nr(task);
329 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800330 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800331}
332
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800333static struct ion_handle *ion_handle_create(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200334 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800335{
336 struct ion_handle *handle;
337
Ben Marsh411059f2016-03-28 19:26:19 +0200338 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800339 if (!handle)
340 return ERR_PTR(-ENOMEM);
341 kref_init(&handle->ref);
342 RB_CLEAR_NODE(&handle->node);
343 handle->client = client;
344 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800345 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800346 handle->buffer = buffer;
347
348 return handle;
349}
350
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800351static void ion_handle_kmap_put(struct ion_handle *);
352
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800353static void ion_handle_destroy(struct kref *kref)
354{
355 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800356 struct ion_client *client = handle->client;
357 struct ion_buffer *buffer = handle->buffer;
358
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800359 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800360 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800361 ion_handle_kmap_put(handle);
362 mutex_unlock(&buffer->lock);
363
Colin Cross47b40452013-12-13 14:24:50 -0800364 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800365 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800366 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800367
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800368 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800369 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800370
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800371 kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376 return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381 kref_get(&handle->ref);
382}
383
EunTaik Lee95902322016-02-24 04:38:06 +0000384static int ion_handle_put_nolock(struct ion_handle *handle)
385{
Johanna Abrahamsson45052462016-08-24 00:41:54 +0200386 return kref_put(&handle->ref, ion_handle_destroy);
EunTaik Lee95902322016-02-24 04:38:06 +0000387}
388
Markus Böhme0045c8d2016-04-06 23:53:39 +0200389static int ion_handle_put(struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800390{
Colin Cross83271f62013-12-13 14:24:59 -0800391 struct ion_client *client = handle->client;
392 int ret;
393
394 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000395 ret = ion_handle_put_nolock(handle);
Colin Cross83271f62013-12-13 14:24:59 -0800396 mutex_unlock(&client->lock);
397
398 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800399}
400
401static struct ion_handle *ion_handle_lookup(struct ion_client *client,
402 struct ion_buffer *buffer)
403{
Colin Crosse1cf3682013-12-13 14:24:51 -0800404 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800405
Colin Crosse1cf3682013-12-13 14:24:51 -0800406 while (n) {
407 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900408
Colin Crosse1cf3682013-12-13 14:24:51 -0800409 if (buffer < entry->buffer)
410 n = n->rb_left;
411 else if (buffer > entry->buffer)
412 n = n->rb_right;
413 else
414 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800415 }
Colin Cross9e907652013-12-13 14:24:49 -0800416 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800417}
418
EunTaik Lee95902322016-02-24 04:38:06 +0000419static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200420 int id)
EunTaik Lee95902322016-02-24 04:38:06 +0000421{
422 struct ion_handle *handle;
423
424 handle = idr_find(&client->idr, id);
425 if (handle)
426 ion_handle_get(handle);
427
428 return handle ? handle : ERR_PTR(-EINVAL);
429}
430
Markus Böhme0045c8d2016-04-06 23:53:39 +0200431static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
432 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800433{
Colin Cross83271f62013-12-13 14:24:59 -0800434 struct ion_handle *handle;
435
436 mutex_lock(&client->lock);
EunTaik Lee95902322016-02-24 04:38:06 +0000437 handle = ion_handle_get_by_id_nolock(client, id);
Colin Cross83271f62013-12-13 14:24:59 -0800438 mutex_unlock(&client->lock);
439
EunTaik Lee95902322016-02-24 04:38:06 +0000440 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800441}
442
John Stultze1d855b2013-12-13 19:26:33 -0800443static bool ion_handle_validate(struct ion_client *client,
444 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800445{
Colin Cross83271f62013-12-13 14:24:59 -0800446 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900447 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800448}
449
450static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
451{
Colin Crossb26661d2013-12-13 14:25:05 -0800452 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800453 struct rb_node **p = &client->handles.rb_node;
454 struct rb_node *parent = NULL;
455 struct ion_handle *entry;
456
Colin Crossb26661d2013-12-13 14:25:05 -0800457 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
458 if (id < 0)
459 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800460
Colin Crossb26661d2013-12-13 14:25:05 -0800461 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800462
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800463 while (*p) {
464 parent = *p;
465 entry = rb_entry(parent, struct ion_handle, node);
466
Colin Crosse1cf3682013-12-13 14:24:51 -0800467 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800468 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800469 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800470 p = &(*p)->rb_right;
471 else
472 WARN(1, "%s: buffer already found.", __func__);
473 }
474
475 rb_link_node(&handle->node, parent, p);
476 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800477
478 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800479}
480
481struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800482 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800483 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800484{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800485 struct ion_handle *handle;
486 struct ion_device *dev = client->dev;
487 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800488 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800489 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800490
Colin Crosse61fc912013-12-13 19:26:14 -0800491 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800492 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800493 /*
494 * traverse the list of heaps available in this system in priority
495 * order. If the heap type is supported by the client, and matches the
496 * request of the caller allocate from it. Repeat until allocate has
497 * succeeded or all heaps have been tried
498 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800499 len = PAGE_ALIGN(len);
500
Colin Crossa14baf72013-12-13 14:25:00 -0800501 if (!len)
502 return ERR_PTR(-EINVAL);
503
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800504 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800505 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800506 /* if the caller didn't specify this heap id */
507 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800508 continue;
509 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800510 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800511 break;
512 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800513 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800514
KyongHo Cho54ac07842013-12-13 14:23:39 -0800515 if (buffer == NULL)
516 return ERR_PTR(-ENODEV);
517
518 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200519 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800520
521 handle = ion_handle_create(client, buffer);
522
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800523 /*
524 * ion_buffer_create will create a buffer with a ref_cnt of 1,
525 * and ion_handle_create will take a second reference, drop one here
526 */
527 ion_buffer_put(buffer);
528
Colin Cross47b40452013-12-13 14:24:50 -0800529 if (IS_ERR(handle))
530 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800531
Colin Cross47b40452013-12-13 14:24:50 -0800532 mutex_lock(&client->lock);
533 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800534 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800535 if (ret) {
536 ion_handle_put(handle);
537 handle = ERR_PTR(ret);
538 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800539
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800540 return handle;
541}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800542EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800543
Didik Setiawan6a9aabb2016-05-14 10:28:10 +0700544static void ion_free_nolock(struct ion_client *client,
545 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800546{
Johanna Abrahamssonc2bbedf2016-08-24 00:02:45 +0200547 if (!ion_handle_validate(client, handle)) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800548 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800549 return;
550 }
EunTaik Lee95902322016-02-24 04:38:06 +0000551 ion_handle_put_nolock(handle);
552}
553
554void ion_free(struct ion_client *client, struct ion_handle *handle)
555{
556 BUG_ON(client != handle->client);
557
558 mutex_lock(&client->lock);
559 ion_free_nolock(client, handle);
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800560 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800561}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800562EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800563
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800564static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
565{
566 void *vaddr;
567
568 if (buffer->kmap_cnt) {
569 buffer->kmap_cnt++;
570 return buffer->vaddr;
571 }
572 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800573 if (WARN_ONCE(vaddr == NULL,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200574 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800575 return ERR_PTR(-EINVAL);
576 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800577 return vaddr;
578 buffer->vaddr = vaddr;
579 buffer->kmap_cnt++;
580 return vaddr;
581}
582
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800583static void *ion_handle_kmap_get(struct ion_handle *handle)
584{
585 struct ion_buffer *buffer = handle->buffer;
586 void *vaddr;
587
588 if (handle->kmap_cnt) {
589 handle->kmap_cnt++;
590 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800591 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800592 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800593 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800594 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800595 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800596 return vaddr;
597}
598
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800599static void ion_buffer_kmap_put(struct ion_buffer *buffer)
600{
601 buffer->kmap_cnt--;
602 if (!buffer->kmap_cnt) {
603 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
604 buffer->vaddr = NULL;
605 }
606}
607
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800608static void ion_handle_kmap_put(struct ion_handle *handle)
609{
610 struct ion_buffer *buffer = handle->buffer;
611
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700612 if (!handle->kmap_cnt) {
613 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
614 return;
615 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800616 handle->kmap_cnt--;
617 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800618 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800619}
620
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800621void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
622{
623 struct ion_buffer *buffer;
624 void *vaddr;
625
626 mutex_lock(&client->lock);
627 if (!ion_handle_validate(client, handle)) {
628 pr_err("%s: invalid handle passed to map_kernel.\n",
629 __func__);
630 mutex_unlock(&client->lock);
631 return ERR_PTR(-EINVAL);
632 }
633
634 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800635
636 if (!handle->buffer->heap->ops->map_kernel) {
637 pr_err("%s: map_kernel is not implemented by this heap.\n",
638 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800639 mutex_unlock(&client->lock);
640 return ERR_PTR(-ENODEV);
641 }
642
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800643 mutex_lock(&buffer->lock);
644 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800645 mutex_unlock(&buffer->lock);
646 mutex_unlock(&client->lock);
647 return vaddr;
648}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800649EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800650
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800651void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
652{
653 struct ion_buffer *buffer;
654
655 mutex_lock(&client->lock);
656 buffer = handle->buffer;
657 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800658 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800659 mutex_unlock(&buffer->lock);
660 mutex_unlock(&client->lock);
661}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800662EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800663
Neil Zhang948c4db2016-01-26 17:39:06 +0800664static struct mutex debugfs_mutex;
665static struct rb_root *ion_root_client;
666static int is_client_alive(struct ion_client *client)
667{
668 struct rb_node *node;
669 struct ion_client *tmp;
670 struct ion_device *dev;
671
672 node = ion_root_client->rb_node;
673 dev = container_of(ion_root_client, struct ion_device, clients);
674
675 down_read(&dev->lock);
676 while (node) {
677 tmp = rb_entry(node, struct ion_client, node);
678 if (client < tmp) {
679 node = node->rb_left;
680 } else if (client > tmp) {
681 node = node->rb_right;
682 } else {
683 up_read(&dev->lock);
684 return 1;
685 }
686 }
687
688 up_read(&dev->lock);
689 return 0;
690}
691
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800692static int ion_debug_client_show(struct seq_file *s, void *unused)
693{
694 struct ion_client *client = s->private;
695 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800696 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800697 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800698 int i;
699
Neil Zhang948c4db2016-01-26 17:39:06 +0800700 mutex_lock(&debugfs_mutex);
701 if (!is_client_alive(client)) {
702 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
703 client);
704 mutex_unlock(&debugfs_mutex);
705 return 0;
706 }
707
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800708 mutex_lock(&client->lock);
709 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
710 struct ion_handle *handle = rb_entry(n, struct ion_handle,
711 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800712 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800713
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800714 if (!names[id])
715 names[id] = handle->buffer->heap->name;
716 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800717 }
718 mutex_unlock(&client->lock);
Neil Zhang948c4db2016-01-26 17:39:06 +0800719 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800720
721 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800722 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800723 if (!names[i])
724 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800725 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800726 }
727 return 0;
728}
729
730static int ion_debug_client_open(struct inode *inode, struct file *file)
731{
732 return single_open(file, ion_debug_client_show, inode->i_private);
733}
734
735static const struct file_operations debug_client_fops = {
736 .open = ion_debug_client_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800742static int ion_get_client_serial(const struct rb_root *root,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200743 const unsigned char *name)
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800744{
745 int serial = -1;
746 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900747
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800748 for (node = rb_first(root); node; node = rb_next(node)) {
749 struct ion_client *client = rb_entry(node, struct ion_client,
750 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900751
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800752 if (strcmp(client->name, name))
753 continue;
754 serial = max(serial, client->display_serial);
755 }
756 return serial + 1;
757}
758
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800759struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800760 const char *name)
761{
762 struct ion_client *client;
763 struct task_struct *task;
764 struct rb_node **p;
765 struct rb_node *parent = NULL;
766 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800767 pid_t pid;
768
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800769 if (!name) {
770 pr_err("%s: Name cannot be null\n", __func__);
771 return ERR_PTR(-EINVAL);
772 }
773
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800774 get_task_struct(current->group_leader);
775 task_lock(current->group_leader);
776 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530777 /*
778 * don't bother to store task struct for kernel threads,
779 * they can't be killed anyway
780 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800781 if (current->group_leader->flags & PF_KTHREAD) {
782 put_task_struct(current->group_leader);
783 task = NULL;
784 } else {
785 task = current->group_leader;
786 }
787 task_unlock(current->group_leader);
788
Ben Marsh411059f2016-03-28 19:26:19 +0200789 client = kzalloc(sizeof(*client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800790 if (!client)
791 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800792
793 client->dev = dev;
794 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800795 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800796 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800797 client->task = task;
798 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800799 client->name = kstrdup(name, GFP_KERNEL);
800 if (!client->name)
801 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800802
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800803 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800804 client->display_serial = ion_get_client_serial(&dev->clients, name);
805 client->display_name = kasprintf(
806 GFP_KERNEL, "%s-%d", name, client->display_serial);
807 if (!client->display_name) {
808 up_write(&dev->lock);
809 goto err_free_client_name;
810 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800811 p = &dev->clients.rb_node;
812 while (*p) {
813 parent = *p;
814 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800815
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800816 if (client < entry)
817 p = &(*p)->rb_left;
818 else if (client > entry)
819 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800820 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800821 rb_link_node(&client->node, parent, p);
822 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800823
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800824 client->debug_root = debugfs_create_file(client->display_name, 0664,
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800825 dev->clients_debug_root,
826 client, &debug_client_fops);
827 if (!client->debug_root) {
828 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700829
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800830 path = dentry_path(dev->clients_debug_root, buf, 256);
831 pr_err("Failed to create client debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200832 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800833 }
834
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800835 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800836
837 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800838
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800839err_free_client_name:
840 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800841err_free_client:
842 kfree(client);
843err_put_task_struct:
844 if (task)
845 put_task_struct(current->group_leader);
846 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800847}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800848EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800849
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800850void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800851{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800852 struct ion_device *dev = client->dev;
853 struct rb_node *n;
854
855 pr_debug("%s: %d\n", __func__, __LINE__);
Neil Zhang948c4db2016-01-26 17:39:06 +0800856 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800857 while ((n = rb_first(&client->handles))) {
858 struct ion_handle *handle = rb_entry(n, struct ion_handle,
859 node);
860 ion_handle_destroy(&handle->ref);
861 }
Colin Cross47b40452013-12-13 14:24:50 -0800862
Colin Cross47b40452013-12-13 14:24:50 -0800863 idr_destroy(&client->idr);
864
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800865 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800866 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800867 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800868 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800869 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800870 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800871
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800872 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800873 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800874 kfree(client);
Neil Zhang948c4db2016-01-26 17:39:06 +0800875 mutex_unlock(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800876}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800877EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800878
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800879static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
880 struct device *dev,
881 enum dma_data_direction direction);
882
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800883static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
884 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800885{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800886 struct dma_buf *dmabuf = attachment->dmabuf;
887 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800888
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800889 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800890 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800891}
892
893static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
894 struct sg_table *table,
895 enum dma_data_direction direction)
896{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800897}
898
Colin Crosse946b202013-12-13 14:25:01 -0800899void ion_pages_sync_for_device(struct device *dev, struct page *page,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200900 size_t size, enum dma_data_direction dir)
Colin Crosse946b202013-12-13 14:25:01 -0800901{
902 struct scatterlist sg;
903
904 sg_init_table(&sg, 1);
905 sg_set_page(&sg, page, size, 0);
906 /*
907 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530908 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800909 * hardware.
910 */
911 sg_dma_address(&sg) = page_to_phys(page);
912 dma_sync_sg_for_device(dev, &sg, 1, dir);
913}
914
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800915struct ion_vma_list {
916 struct list_head list;
917 struct vm_area_struct *vma;
918};
919
920static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
921 struct device *dev,
922 enum dma_data_direction dir)
923{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800924 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800925 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
926 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800927
928 pr_debug("%s: syncing for device %s\n", __func__,
929 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800930
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800931 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800932 return;
933
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800934 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800935 for (i = 0; i < pages; i++) {
936 struct page *page = buffer->pages[i];
937
938 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800939 ion_pages_sync_for_device(dev, ion_buffer_page(page),
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +0200940 PAGE_SIZE, dir);
Colin Crosse946b202013-12-13 14:25:01 -0800941
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800942 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800943 }
944 list_for_each_entry(vma_list, &buffer->vmas, list) {
945 struct vm_area_struct *vma = vma_list->vma;
946
947 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
948 NULL);
949 }
950 mutex_unlock(&buffer->lock);
951}
952
Colin Crossf63958d2013-12-13 19:26:28 -0800953static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800954{
955 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800956 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800957 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800958
959 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800960 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800961 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800962
963 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
964 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800965 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800966 if (ret)
967 return VM_FAULT_ERROR;
968
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800969 return VM_FAULT_NOPAGE;
970}
971
972static void ion_vm_open(struct vm_area_struct *vma)
973{
974 struct ion_buffer *buffer = vma->vm_private_data;
975 struct ion_vma_list *vma_list;
976
Ben Marsh411059f2016-03-28 19:26:19 +0200977 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800978 if (!vma_list)
979 return;
980 vma_list->vma = vma;
981 mutex_lock(&buffer->lock);
982 list_add(&vma_list->list, &buffer->vmas);
983 mutex_unlock(&buffer->lock);
984 pr_debug("%s: adding %p\n", __func__, vma);
985}
986
987static void ion_vm_close(struct vm_area_struct *vma)
988{
989 struct ion_buffer *buffer = vma->vm_private_data;
990 struct ion_vma_list *vma_list, *tmp;
991
992 pr_debug("%s\n", __func__);
993 mutex_lock(&buffer->lock);
994 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
995 if (vma_list->vma != vma)
996 continue;
997 list_del(&vma_list->list);
998 kfree(vma_list);
999 pr_debug("%s: deleting %p\n", __func__, vma);
1000 break;
1001 }
1002 mutex_unlock(&buffer->lock);
1003}
1004
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001005static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001006 .open = ion_vm_open,
1007 .close = ion_vm_close,
1008 .fault = ion_vm_fault,
1009};
1010
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001011static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001012{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001013 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001014 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001015
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001016 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +02001017 pr_err("%s: this heap does not define a method for mapping to userspace\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001018 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001019 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001020 }
1021
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001022 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -08001023 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1024 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001025 vma->vm_private_data = buffer;
1026 vma->vm_ops = &ion_vma_ops;
1027 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001028 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001029 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001030
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001031 if (!(buffer->flags & ION_FLAG_CACHED))
1032 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1033
1034 mutex_lock(&buffer->lock);
1035 /* now map it to userspace */
1036 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1037 mutex_unlock(&buffer->lock);
1038
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001039 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001040 pr_err("%s: failure mapping buffer to userspace\n",
1041 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001042
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001043 return ret;
1044}
1045
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001046static void ion_dma_buf_release(struct dma_buf *dmabuf)
1047{
1048 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001049
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001050 ion_buffer_put(buffer);
1051}
1052
1053static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1054{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001055 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001056
Greg Hackmann12edf532013-12-13 14:24:00 -08001057 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001058}
1059
1060static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1061 void *ptr)
1062{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001063}
1064
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001065static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001066 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001067{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001068 struct ion_buffer *buffer = dmabuf->priv;
1069 void *vaddr;
1070
1071 if (!buffer->heap->ops->map_kernel) {
1072 pr_err("%s: map kernel is not implemented by this heap.\n",
1073 __func__);
1074 return -ENODEV;
1075 }
1076
1077 mutex_lock(&buffer->lock);
1078 vaddr = ion_buffer_kmap_get(buffer);
1079 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301080 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001081}
1082
Chris Wilson18b862d2016-03-18 20:02:39 +00001083static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1084 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001085{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001086 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001087
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001088 mutex_lock(&buffer->lock);
1089 ion_buffer_kmap_put(buffer);
1090 mutex_unlock(&buffer->lock);
Chris Wilson18b862d2016-03-18 20:02:39 +00001091
1092 return 0;
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001093}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001094
Colin Crossf63958d2013-12-13 19:26:28 -08001095static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001096 .map_dma_buf = ion_map_dma_buf,
1097 .unmap_dma_buf = ion_unmap_dma_buf,
1098 .mmap = ion_mmap,
1099 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001100 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1101 .end_cpu_access = ion_dma_buf_end_cpu_access,
1102 .kmap_atomic = ion_dma_buf_kmap,
1103 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001104 .kmap = ion_dma_buf_kmap,
1105 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001106};
1107
Johan Mossberg22ba4322013-12-13 14:24:34 -08001108struct dma_buf *ion_share_dma_buf(struct ion_client *client,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001109 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001110{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001111 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001112 struct ion_buffer *buffer;
1113 struct dma_buf *dmabuf;
1114 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301115
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001116 mutex_lock(&client->lock);
1117 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001118 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001119 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001120 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001121 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001122 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001123 buffer = handle->buffer;
1124 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001125 mutex_unlock(&client->lock);
1126
Sumit Semwal72449cb2015-02-21 09:00:17 +05301127 exp_info.ops = &dma_buf_ops;
1128 exp_info.size = buffer->size;
1129 exp_info.flags = O_RDWR;
1130 exp_info.priv = buffer;
1131
Sumit Semwald8fbe342015-01-23 12:53:43 +05301132 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001133 if (IS_ERR(dmabuf)) {
1134 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001135 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001136 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001137
1138 return dmabuf;
1139}
1140EXPORT_SYMBOL(ion_share_dma_buf);
1141
1142int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1143{
1144 struct dma_buf *dmabuf;
1145 int fd;
1146
1147 dmabuf = ion_share_dma_buf(client, handle);
1148 if (IS_ERR(dmabuf))
1149 return PTR_ERR(dmabuf);
1150
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001151 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001152 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001153 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001154
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001155 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001156}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001157EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001158
Rohit kumar9f903812016-01-12 09:31:46 +05301159struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1160 struct dma_buf *dmabuf)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001161{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001162 struct ion_buffer *buffer;
1163 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001164 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001165
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001166 /* if this memory came from ion */
1167
1168 if (dmabuf->ops != &dma_buf_ops) {
1169 pr_err("%s: can not import dmabuf from another exporter\n",
1170 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001171 return ERR_PTR(-EINVAL);
1172 }
1173 buffer = dmabuf->priv;
1174
1175 mutex_lock(&client->lock);
1176 /* if a handle exists for this buffer just take a reference to it */
1177 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001178 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001179 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001180 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001181 goto end;
1182 }
Colin Cross83271f62013-12-13 14:24:59 -08001183
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001184 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001185 if (IS_ERR(handle)) {
1186 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001187 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001188 }
Colin Cross83271f62013-12-13 14:24:59 -08001189
Colin Cross47b40452013-12-13 14:24:50 -08001190 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001191 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001192 if (ret) {
1193 ion_handle_put(handle);
1194 handle = ERR_PTR(ret);
1195 }
Colin Cross83271f62013-12-13 14:24:59 -08001196
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001197end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001198 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001199}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001200EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001201
Rohit kumar9f903812016-01-12 09:31:46 +05301202struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1203{
1204 struct dma_buf *dmabuf;
1205 struct ion_handle *handle;
1206
1207 dmabuf = dma_buf_get(fd);
1208 if (IS_ERR(dmabuf))
1209 return ERR_CAST(dmabuf);
1210
1211 handle = ion_import_dma_buf(client, dmabuf);
1212 dma_buf_put(dmabuf);
1213 return handle;
1214}
1215EXPORT_SYMBOL(ion_import_dma_buf_fd);
1216
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001217static int ion_sync_for_device(struct ion_client *client, int fd)
1218{
1219 struct dma_buf *dmabuf;
1220 struct ion_buffer *buffer;
1221
1222 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001223 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001224 return PTR_ERR(dmabuf);
1225
1226 /* if this memory came from ion */
1227 if (dmabuf->ops != &dma_buf_ops) {
1228 pr_err("%s: can not sync dmabuf from another exporter\n",
1229 __func__);
1230 dma_buf_put(dmabuf);
1231 return -EINVAL;
1232 }
1233 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001234
1235 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1236 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001237 dma_buf_put(dmabuf);
1238 return 0;
1239}
1240
Colin Crossdb866e32013-12-13 19:26:16 -08001241/* fix up the cases where the ioctl direction bits are incorrect */
1242static unsigned int ion_ioctl_dir(unsigned int cmd)
1243{
1244 switch (cmd) {
1245 case ION_IOC_SYNC:
1246 case ION_IOC_FREE:
1247 case ION_IOC_CUSTOM:
1248 return _IOC_WRITE;
1249 default:
1250 return _IOC_DIR(cmd);
1251 }
1252}
1253
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001254static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1255{
1256 struct ion_client *client = filp->private_data;
Colin Crossdb866e32013-12-13 19:26:16 -08001257 struct ion_device *dev = client->dev;
1258 struct ion_handle *cleanup_handle = NULL;
1259 int ret = 0;
1260 unsigned int dir;
1261
1262 union {
1263 struct ion_fd_data fd;
1264 struct ion_allocation_data allocation;
1265 struct ion_handle_data handle;
1266 struct ion_custom_data custom;
1267 } data;
1268
1269 dir = ion_ioctl_dir(cmd);
1270
1271 if (_IOC_SIZE(cmd) > sizeof(data))
1272 return -EINVAL;
1273
1274 if (dir & _IOC_WRITE)
1275 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1276 return -EFAULT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001277
1278 switch (cmd) {
1279 case ION_IOC_ALLOC:
1280 {
Colin Cross47b40452013-12-13 14:24:50 -08001281 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001282
Colin Crossdb866e32013-12-13 19:26:16 -08001283 handle = ion_alloc(client, data.allocation.len,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001284 data.allocation.align,
1285 data.allocation.heap_id_mask,
1286 data.allocation.flags);
Colin Cross47b40452013-12-13 14:24:50 -08001287 if (IS_ERR(handle))
1288 return PTR_ERR(handle);
1289
Colin Crossdb866e32013-12-13 19:26:16 -08001290 data.allocation.handle = handle->id;
KyongHo Cho54ac07842013-12-13 14:23:39 -08001291
Colin Crossdb866e32013-12-13 19:26:16 -08001292 cleanup_handle = handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001293 break;
1294 }
1295 case ION_IOC_FREE:
1296 {
Colin Cross47b40452013-12-13 14:24:50 -08001297 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001298
EunTaik Lee95902322016-02-24 04:38:06 +00001299 mutex_lock(&client->lock);
Didik Setiawan6a9aabb2016-05-14 10:28:10 +07001300 handle = ion_handle_get_by_id_nolock(client,
1301 data.handle.handle);
EunTaik Lee95902322016-02-24 04:38:06 +00001302 if (IS_ERR(handle)) {
1303 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -08001304 return PTR_ERR(handle);
EunTaik Lee95902322016-02-24 04:38:06 +00001305 }
1306 ion_free_nolock(client, handle);
1307 ion_handle_put_nolock(handle);
1308 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001309 break;
1310 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001311 case ION_IOC_SHARE:
Rebecca Schultz Zavindf0f6c72013-12-13 14:24:24 -08001312 case ION_IOC_MAP:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001313 {
Colin Cross47b40452013-12-13 14:24:50 -08001314 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001315
Colin Crossdb866e32013-12-13 19:26:16 -08001316 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001317 if (IS_ERR(handle))
1318 return PTR_ERR(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001319 data.fd.fd = ion_share_dma_buf_fd(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001320 ion_handle_put(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001321 if (data.fd.fd < 0)
1322 ret = data.fd.fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001323 break;
1324 }
1325 case ION_IOC_IMPORT:
1326 {
Colin Cross47b40452013-12-13 14:24:50 -08001327 struct ion_handle *handle;
Seunghun Lee10f62862014-05-01 01:30:23 +09001328
Rohit kumar9f903812016-01-12 09:31:46 +05301329 handle = ion_import_dma_buf_fd(client, data.fd.fd);
Colin Cross47b40452013-12-13 14:24:50 -08001330 if (IS_ERR(handle))
1331 ret = PTR_ERR(handle);
1332 else
Colin Crossdb866e32013-12-13 19:26:16 -08001333 data.handle.handle = handle->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001334 break;
1335 }
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001336 case ION_IOC_SYNC:
1337 {
Colin Crossdb866e32013-12-13 19:26:16 -08001338 ret = ion_sync_for_device(client, data.fd.fd);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001339 break;
1340 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001341 case ION_IOC_CUSTOM:
1342 {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001343 if (!dev->custom_ioctl)
1344 return -ENOTTY;
Colin Crossdb866e32013-12-13 19:26:16 -08001345 ret = dev->custom_ioctl(client, data.custom.cmd,
1346 data.custom.arg);
1347 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001348 }
1349 default:
1350 return -ENOTTY;
1351 }
Colin Crossdb866e32013-12-13 19:26:16 -08001352
1353 if (dir & _IOC_READ) {
1354 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1355 if (cleanup_handle)
1356 ion_free(client, cleanup_handle);
1357 return -EFAULT;
1358 }
1359 }
1360 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001361}
1362
1363static int ion_release(struct inode *inode, struct file *file)
1364{
1365 struct ion_client *client = file->private_data;
1366
1367 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001368 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001369 return 0;
1370}
1371
1372static int ion_open(struct inode *inode, struct file *file)
1373{
1374 struct miscdevice *miscdev = file->private_data;
1375 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1376 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001377 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001378
1379 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001380 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1381 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001382 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001383 return PTR_ERR(client);
1384 file->private_data = client;
1385
1386 return 0;
1387}
1388
1389static const struct file_operations ion_fops = {
1390 .owner = THIS_MODULE,
1391 .open = ion_open,
1392 .release = ion_release,
1393 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001394 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001395};
1396
1397static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001398 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001399{
1400 size_t size = 0;
1401 struct rb_node *n;
1402
1403 mutex_lock(&client->lock);
1404 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1405 struct ion_handle *handle = rb_entry(n,
1406 struct ion_handle,
1407 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001408 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001409 size += handle->buffer->size;
1410 }
1411 mutex_unlock(&client->lock);
1412 return size;
1413}
1414
1415static int ion_debug_heap_show(struct seq_file *s, void *unused)
1416{
1417 struct ion_heap *heap = s->private;
1418 struct ion_device *dev = heap->dev;
1419 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001420 size_t total_size = 0;
1421 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001422
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001423 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001424 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001425
Neil Zhang948c4db2016-01-26 17:39:06 +08001426 mutex_lock(&debugfs_mutex);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001427 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001428 struct ion_client *client = rb_entry(n, struct ion_client,
1429 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001430 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001431
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001432 if (!size)
1433 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001434 if (client->task) {
1435 char task_comm[TASK_COMM_LEN];
1436
1437 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001438 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001439 client->pid, size);
1440 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001441 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001442 client->pid, size);
1443 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001444 }
Neil Zhang948c4db2016-01-26 17:39:06 +08001445 mutex_unlock(&debugfs_mutex);
1446
Iulia Manda164ad862014-03-11 20:12:29 +02001447 seq_puts(s, "----------------------------------------------------\n");
1448 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001449 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001450 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1451 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1452 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001453 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001454 continue;
1455 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001456 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001457 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001458 buffer->task_comm, buffer->pid,
1459 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001460 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001461 total_orphaned_size += buffer->size;
1462 }
1463 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001464 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001465 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001466 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001467 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001468 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001469 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001470 seq_printf(s, "%16s %16zu\n", "deferred free",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001471 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001472 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001473
1474 if (heap->debug_show)
1475 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001476
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001477 return 0;
1478}
1479
1480static int ion_debug_heap_open(struct inode *inode, struct file *file)
1481{
1482 return single_open(file, ion_debug_heap_show, inode->i_private);
1483}
1484
1485static const struct file_operations debug_heap_fops = {
1486 .open = ion_debug_heap_open,
1487 .read = seq_read,
1488 .llseek = seq_lseek,
1489 .release = single_release,
1490};
1491
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001492static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001493{
John Stultze1d855b2013-12-13 19:26:33 -08001494 struct ion_heap *heap = data;
1495 struct shrink_control sc;
1496 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001497
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001498 sc.gfp_mask = GFP_HIGHUSER;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001499 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001500
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001501 if (!val) {
1502 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1503 sc.nr_to_scan = objs;
1504 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001505
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001506 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001507 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001508}
1509
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001510static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001511{
John Stultze1d855b2013-12-13 19:26:33 -08001512 struct ion_heap *heap = data;
1513 struct shrink_control sc;
1514 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001515
Derek Yerger3b0ae7b2016-03-11 17:31:18 -05001516 sc.gfp_mask = GFP_HIGHUSER;
John Stultze1d855b2013-12-13 19:26:33 -08001517 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001518
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001519 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001520 *val = objs;
1521 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001522}
1523
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001524DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001525 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001526
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001527void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1528{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001529 struct dentry *debug_file;
1530
Laura Abbottf82ad602016-08-08 09:52:56 -07001531 if (!heap->ops->allocate || !heap->ops->free)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001532 pr_err("%s: can not add heap with invalid ops struct.\n",
1533 __func__);
1534
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001535 spin_lock_init(&heap->free_lock);
1536 heap->free_list_size = 0;
1537
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001538 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1539 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001540
Colin Crossb9daf0b2014-02-17 13:58:38 -08001541 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1542 ion_heap_init_shrinker(heap);
1543
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001544 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001545 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301546 /*
1547 * use negative heap->id to reverse the priority -- when traversing
1548 * the list later attempt higher id numbers first
1549 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001550 plist_node_init(&heap->node, -heap->id);
1551 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001552 debug_file = debugfs_create_file(heap->name, 0664,
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001553 dev->heaps_debug_root, heap,
1554 &debug_heap_fops);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001555
1556 if (!debug_file) {
1557 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001558
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001559 path = dentry_path(dev->heaps_debug_root, buf, 256);
1560 pr_err("Failed to create heap debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001561 path, heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001562 }
1563
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001564 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001565 char debug_name[64];
1566
1567 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001568 debug_file = debugfs_create_file(
1569 debug_name, 0644, dev->heaps_debug_root, heap,
1570 &debug_shrink_fops);
1571 if (!debug_file) {
1572 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001573
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001574 path = dentry_path(dev->heaps_debug_root, buf, 256);
1575 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
Johanna Abrahamsson121ca0c2016-08-22 12:16:58 +02001576 path, debug_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001577 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001578 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001579
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001580 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001581}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001582EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001583
1584struct ion_device *ion_device_create(long (*custom_ioctl)
1585 (struct ion_client *client,
1586 unsigned int cmd,
1587 unsigned long arg))
1588{
1589 struct ion_device *idev;
1590 int ret;
1591
Ben Marsh411059f2016-03-28 19:26:19 +02001592 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001593 if (!idev)
1594 return ERR_PTR(-ENOMEM);
1595
1596 idev->dev.minor = MISC_DYNAMIC_MINOR;
1597 idev->dev.name = "ion";
1598 idev->dev.fops = &ion_fops;
1599 idev->dev.parent = NULL;
1600 ret = misc_register(&idev->dev);
1601 if (ret) {
1602 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301603 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001604 return ERR_PTR(ret);
1605 }
1606
1607 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001608 if (!idev->debug_root) {
1609 pr_err("ion: failed to create debugfs root directory.\n");
1610 goto debugfs_done;
1611 }
1612 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1613 if (!idev->heaps_debug_root) {
1614 pr_err("ion: failed to create debugfs heaps directory.\n");
1615 goto debugfs_done;
1616 }
1617 idev->clients_debug_root = debugfs_create_dir("clients",
1618 idev->debug_root);
1619 if (!idev->clients_debug_root)
1620 pr_err("ion: failed to create debugfs clients directory.\n");
1621
1622debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001623
1624 idev->custom_ioctl = custom_ioctl;
1625 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001626 mutex_init(&idev->buffer_lock);
1627 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001628 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001629 idev->clients = RB_ROOT;
Neil Zhang948c4db2016-01-26 17:39:06 +08001630 ion_root_client = &idev->clients;
1631 mutex_init(&debugfs_mutex);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001632 return idev;
1633}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001634EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001635
1636void ion_device_destroy(struct ion_device *dev)
1637{
1638 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001639 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001640 /* XXX need to free the heaps and clients ? */
1641 kfree(dev);
1642}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001643EXPORT_SYMBOL(ion_device_destroy);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001644