blob: 0754a37c967495fdafdf5bee4bd67d505c347dda [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Sriram Raghunathan7e416172015-09-22 22:35:51 +05302 *
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
Sachin Kamatab0c0692014-01-27 12:17:05 +053019#include <linux/err.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080020#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080021#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080022#include <linux/fs.h>
23#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080024#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080025#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080026#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080027#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080032#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080035#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080036#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080037#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080038#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080039
40#include "ion.h"
41#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080042#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080043
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080047 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080050 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080056 struct mutex buffer_lock;
57 struct rw_semaphore lock;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -080058 struct plist_head heaps;
Daeseok Youn51108982014-02-10 20:16:50 +090059 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080061 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080062 struct dentry *debug_root;
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -080063 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080065};
66
67/**
68 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080069 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
Colin Cross47b40452013-12-13 14:24:50 -080072 * @idr: an idr space for allocating handle ids
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080073 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080074 * @name: used for debugging
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080075 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080077 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080084 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
Colin Cross47b40452013-12-13 14:24:50 -080087 struct idr idr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080088 struct mutex lock;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080089 const char *name;
Mitchel Humpherys2803ac72014-02-17 13:58:37 -080090 char *display_name;
91 int display_serial;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080092 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95};
96
97/**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
Colin Cross47b40452013-12-13 14:24:50 -0800104 * @id: client-unique id allocated by client->idr
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
Colin Cross47b40452013-12-13 14:24:50 -0800115 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800116};
117
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119{
John Stultze1d855b2013-12-13 19:26:33 -0800120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800122}
123
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800124bool ion_buffer_cached(struct ion_buffer *buffer)
125{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800126 return !!(buffer->flags & ION_FLAG_CACHED);
127}
128
129static inline struct page *ion_buffer_page(struct page *page)
130{
131 return (struct page *)((unsigned long)page & ~(1UL));
132}
133
134static inline bool ion_buffer_page_is_dirty(struct page *page)
135{
136 return !!((unsigned long)page & 1UL);
137}
138
139static inline void ion_buffer_page_dirty(struct page **page)
140{
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142}
143
144static inline void ion_buffer_page_clean(struct page **page)
145{
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800147}
148
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800149/* this function should only be called while dev->lock is held */
150static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152{
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173}
174
175/* this function should only be called while dev->lock is held */
176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
181{
182 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800183 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800184 struct scatterlist *sg;
185 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800186
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800192 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800196
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800197 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800201 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800206 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800207
Greg Hackmann056be392013-12-13 14:23:45 -0800208 buffer->dev = dev;
209 buffer->size = len;
210
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800211 table = heap->ops->map_dma(heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800214 table = ERR_PTR(-EINVAL);
215 if (IS_ERR(table)) {
Rohit kumara56d0922015-09-30 11:07:35 +0530216 ret = -EINVAL;
217 goto err1;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800218 }
Rohit kumara56d0922015-09-30 11:07:35 +0530219
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800220 buffer->sg_table = table;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800221 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
224 int i, j, k = 0;
225
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
228 ret = -ENOMEM;
Rohit kumara56d0922015-09-30 11:07:35 +0530229 goto err;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800230 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800231
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
234
Colin Cross06e0dca2013-12-13 14:25:02 -0800235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800236 buffer->pages[k++] = page++;
237 }
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800238 }
239
240 buffer->dev = dev;
241 buffer->size = len;
242 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800243 mutex_init(&buffer->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530244 /*
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
253 */
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
255 sg_dma_address(sg) = sg_phys(sg);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800256 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800257 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800258 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800259 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800260
261err:
262 heap->ops->unmap_dma(heap, buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800263err1:
Rohit kumara56d0922015-09-30 11:07:35 +0530264 heap->ops->free(buffer);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800265err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800266 kfree(buffer);
267 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800268}
269
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800270void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800271{
272 if (WARN_ON(buffer->kmap_cnt > 0))
273 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
274 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
275 buffer->heap->ops->free(buffer);
Markus Elfring698f1402014-11-23 18:48:15 +0100276 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800277 kfree(buffer);
278}
279
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800280static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800281{
282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800283 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800284 struct ion_device *dev = buffer->dev;
285
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800286 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800287 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800288 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800289
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800290 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
291 ion_heap_freelist_add(heap, buffer);
292 else
293 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800294}
295
296static void ion_buffer_get(struct ion_buffer *buffer)
297{
298 kref_get(&buffer->ref);
299}
300
301static int ion_buffer_put(struct ion_buffer *buffer)
302{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800303 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800304}
305
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800306static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
307{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800308 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800309 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800310 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800311}
312
313static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
314{
315 /*
316 * when a buffer is removed from a handle, if it is not in
317 * any other handles, copy the taskcomm and the pid of the
318 * process it's being removed from into the buffer. At this
319 * point there will be no way to track what processes this buffer is
320 * being used by, it only exists as a dma_buf file descriptor.
321 * The taskcomm and pid can provide a debug hint as to where this fd
322 * is in the system
323 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800324 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800325 buffer->handle_count--;
326 BUG_ON(buffer->handle_count < 0);
327 if (!buffer->handle_count) {
328 struct task_struct *task;
329
330 task = current->group_leader;
331 get_task_comm(buffer->task_comm, task);
332 buffer->pid = task_pid_nr(task);
333 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800334 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800335}
336
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800337static struct ion_handle *ion_handle_create(struct ion_client *client,
338 struct ion_buffer *buffer)
339{
340 struct ion_handle *handle;
341
342 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
343 if (!handle)
344 return ERR_PTR(-ENOMEM);
345 kref_init(&handle->ref);
346 RB_CLEAR_NODE(&handle->node);
347 handle->client = client;
348 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800349 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800350 handle->buffer = buffer;
351
352 return handle;
353}
354
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800355static void ion_handle_kmap_put(struct ion_handle *);
356
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800357static void ion_handle_destroy(struct kref *kref)
358{
359 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800360 struct ion_client *client = handle->client;
361 struct ion_buffer *buffer = handle->buffer;
362
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800363 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800364 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800365 ion_handle_kmap_put(handle);
366 mutex_unlock(&buffer->lock);
367
Colin Cross47b40452013-12-13 14:24:50 -0800368 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800369 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800370 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800371
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800372 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800373 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800374
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800375 kfree(handle);
376}
377
378struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
379{
380 return handle->buffer;
381}
382
383static void ion_handle_get(struct ion_handle *handle)
384{
385 kref_get(&handle->ref);
386}
387
388static int ion_handle_put(struct ion_handle *handle)
389{
Colin Cross83271f62013-12-13 14:24:59 -0800390 struct ion_client *client = handle->client;
391 int ret;
392
393 mutex_lock(&client->lock);
394 ret = kref_put(&handle->ref, ion_handle_destroy);
395 mutex_unlock(&client->lock);
396
397 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800398}
399
400static struct ion_handle *ion_handle_lookup(struct ion_client *client,
401 struct ion_buffer *buffer)
402{
Colin Crosse1cf3682013-12-13 14:24:51 -0800403 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800404
Colin Crosse1cf3682013-12-13 14:24:51 -0800405 while (n) {
406 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900407
Colin Crosse1cf3682013-12-13 14:24:51 -0800408 if (buffer < entry->buffer)
409 n = n->rb_left;
410 else if (buffer > entry->buffer)
411 n = n->rb_right;
412 else
413 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800414 }
Colin Cross9e907652013-12-13 14:24:49 -0800415 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800416}
417
Colin Cross83271f62013-12-13 14:24:59 -0800418static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
419 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800420{
Colin Cross83271f62013-12-13 14:24:59 -0800421 struct ion_handle *handle;
422
423 mutex_lock(&client->lock);
424 handle = idr_find(&client->idr, id);
425 if (handle)
426 ion_handle_get(handle);
427 mutex_unlock(&client->lock);
428
429 return handle ? handle : ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800430}
431
John Stultze1d855b2013-12-13 19:26:33 -0800432static bool ion_handle_validate(struct ion_client *client,
433 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800434{
Colin Cross83271f62013-12-13 14:24:59 -0800435 WARN_ON(!mutex_is_locked(&client->lock));
Daeseok Youn51108982014-02-10 20:16:50 +0900436 return idr_find(&client->idr, handle->id) == handle;
Colin Cross47b40452013-12-13 14:24:50 -0800437}
438
439static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
440{
Colin Crossb26661d2013-12-13 14:25:05 -0800441 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800442 struct rb_node **p = &client->handles.rb_node;
443 struct rb_node *parent = NULL;
444 struct ion_handle *entry;
445
Colin Crossb26661d2013-12-13 14:25:05 -0800446 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
447 if (id < 0)
448 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800449
Colin Crossb26661d2013-12-13 14:25:05 -0800450 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800451
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800452 while (*p) {
453 parent = *p;
454 entry = rb_entry(parent, struct ion_handle, node);
455
Colin Crosse1cf3682013-12-13 14:24:51 -0800456 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800457 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800458 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800459 p = &(*p)->rb_right;
460 else
461 WARN(1, "%s: buffer already found.", __func__);
462 }
463
464 rb_link_node(&handle->node, parent, p);
465 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800466
467 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800468}
469
470struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800471 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800472 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800473{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800474 struct ion_handle *handle;
475 struct ion_device *dev = client->dev;
476 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800477 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800478 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800479
Colin Crosse61fc912013-12-13 19:26:14 -0800480 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800481 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800482 /*
483 * traverse the list of heaps available in this system in priority
484 * order. If the heap type is supported by the client, and matches the
485 * request of the caller allocate from it. Repeat until allocate has
486 * succeeded or all heaps have been tried
487 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800488 len = PAGE_ALIGN(len);
489
Colin Crossa14baf72013-12-13 14:25:00 -0800490 if (!len)
491 return ERR_PTR(-EINVAL);
492
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800493 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800494 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800495 /* if the caller didn't specify this heap id */
496 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800497 continue;
498 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800499 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800500 break;
501 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800502 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800503
KyongHo Cho54ac07842013-12-13 14:23:39 -0800504 if (buffer == NULL)
505 return ERR_PTR(-ENODEV);
506
507 if (IS_ERR(buffer))
Iulia Manda464a5022014-03-11 20:14:36 +0200508 return ERR_CAST(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800509
510 handle = ion_handle_create(client, buffer);
511
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800512 /*
513 * ion_buffer_create will create a buffer with a ref_cnt of 1,
514 * and ion_handle_create will take a second reference, drop one here
515 */
516 ion_buffer_put(buffer);
517
Colin Cross47b40452013-12-13 14:24:50 -0800518 if (IS_ERR(handle))
519 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800520
Colin Cross47b40452013-12-13 14:24:50 -0800521 mutex_lock(&client->lock);
522 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800523 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800524 if (ret) {
525 ion_handle_put(handle);
526 handle = ERR_PTR(ret);
527 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800528
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800529 return handle;
530}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800531EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800532
533void ion_free(struct ion_client *client, struct ion_handle *handle)
534{
535 bool valid_handle;
536
537 BUG_ON(client != handle->client);
538
539 mutex_lock(&client->lock);
540 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800541
542 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800543 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin37bdbf02013-12-13 14:24:02 -0800544 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800545 return;
546 }
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800547 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -0800548 ion_handle_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800549}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800550EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800551
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800552int ion_phys(struct ion_client *client, struct ion_handle *handle,
553 ion_phys_addr_t *addr, size_t *len)
554{
555 struct ion_buffer *buffer;
556 int ret;
557
558 mutex_lock(&client->lock);
559 if (!ion_handle_validate(client, handle)) {
560 mutex_unlock(&client->lock);
561 return -EINVAL;
562 }
563
564 buffer = handle->buffer;
565
566 if (!buffer->heap->ops->phys) {
Mitchel Humpherysd9954892015-02-13 18:05:34 -0800567 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
568 __func__, buffer->heap->name, buffer->heap->type);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800569 mutex_unlock(&client->lock);
570 return -ENODEV;
571 }
572 mutex_unlock(&client->lock);
573 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
574 return ret;
575}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800576EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800577
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800578static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
579{
580 void *vaddr;
581
582 if (buffer->kmap_cnt) {
583 buffer->kmap_cnt++;
584 return buffer->vaddr;
585 }
586 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
John Stultze1d855b2013-12-13 19:26:33 -0800587 if (WARN_ONCE(vaddr == NULL,
588 "heap->ops->map_kernel should return ERR_PTR on error"))
Colin Cross9e907652013-12-13 14:24:49 -0800589 return ERR_PTR(-EINVAL);
590 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800591 return vaddr;
592 buffer->vaddr = vaddr;
593 buffer->kmap_cnt++;
594 return vaddr;
595}
596
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800597static void *ion_handle_kmap_get(struct ion_handle *handle)
598{
599 struct ion_buffer *buffer = handle->buffer;
600 void *vaddr;
601
602 if (handle->kmap_cnt) {
603 handle->kmap_cnt++;
604 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800605 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800606 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800607 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800608 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800609 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800610 return vaddr;
611}
612
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800613static void ion_buffer_kmap_put(struct ion_buffer *buffer)
614{
615 buffer->kmap_cnt--;
616 if (!buffer->kmap_cnt) {
617 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
618 buffer->vaddr = NULL;
619 }
620}
621
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800622static void ion_handle_kmap_put(struct ion_handle *handle)
623{
624 struct ion_buffer *buffer = handle->buffer;
625
Mitchel Humpherys22f6b972014-05-23 13:01:22 -0700626 if (!handle->kmap_cnt) {
627 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
628 return;
629 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800630 handle->kmap_cnt--;
631 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800632 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800633}
634
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800635void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
636{
637 struct ion_buffer *buffer;
638 void *vaddr;
639
640 mutex_lock(&client->lock);
641 if (!ion_handle_validate(client, handle)) {
642 pr_err("%s: invalid handle passed to map_kernel.\n",
643 __func__);
644 mutex_unlock(&client->lock);
645 return ERR_PTR(-EINVAL);
646 }
647
648 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800649
650 if (!handle->buffer->heap->ops->map_kernel) {
651 pr_err("%s: map_kernel is not implemented by this heap.\n",
652 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800653 mutex_unlock(&client->lock);
654 return ERR_PTR(-ENODEV);
655 }
656
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800657 mutex_lock(&buffer->lock);
658 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800659 mutex_unlock(&buffer->lock);
660 mutex_unlock(&client->lock);
661 return vaddr;
662}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800663EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800664
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800665void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
666{
667 struct ion_buffer *buffer;
668
669 mutex_lock(&client->lock);
670 buffer = handle->buffer;
671 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800672 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800673 mutex_unlock(&buffer->lock);
674 mutex_unlock(&client->lock);
675}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800676EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800677
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800678static int ion_debug_client_show(struct seq_file *s, void *unused)
679{
680 struct ion_client *client = s->private;
681 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800682 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800683 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800684 int i;
685
686 mutex_lock(&client->lock);
687 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
688 struct ion_handle *handle = rb_entry(n, struct ion_handle,
689 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800690 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800691
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800692 if (!names[id])
693 names[id] = handle->buffer->heap->name;
694 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800695 }
696 mutex_unlock(&client->lock);
697
698 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800699 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800700 if (!names[i])
701 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800702 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800703 }
704 return 0;
705}
706
707static int ion_debug_client_open(struct inode *inode, struct file *file)
708{
709 return single_open(file, ion_debug_client_show, inode->i_private);
710}
711
712static const struct file_operations debug_client_fops = {
713 .open = ion_debug_client_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = single_release,
717};
718
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800719static int ion_get_client_serial(const struct rb_root *root,
720 const unsigned char *name)
721{
722 int serial = -1;
723 struct rb_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +0900724
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800725 for (node = rb_first(root); node; node = rb_next(node)) {
726 struct ion_client *client = rb_entry(node, struct ion_client,
727 node);
Seunghun Lee10f62862014-05-01 01:30:23 +0900728
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800729 if (strcmp(client->name, name))
730 continue;
731 serial = max(serial, client->display_serial);
732 }
733 return serial + 1;
734}
735
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800736struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800737 const char *name)
738{
739 struct ion_client *client;
740 struct task_struct *task;
741 struct rb_node **p;
742 struct rb_node *parent = NULL;
743 struct ion_client *entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800744 pid_t pid;
745
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800746 if (!name) {
747 pr_err("%s: Name cannot be null\n", __func__);
748 return ERR_PTR(-EINVAL);
749 }
750
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800751 get_task_struct(current->group_leader);
752 task_lock(current->group_leader);
753 pid = task_pid_nr(current->group_leader);
Sriram Raghunathan7e416172015-09-22 22:35:51 +0530754 /*
755 * don't bother to store task struct for kernel threads,
756 * they can't be killed anyway
757 */
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800758 if (current->group_leader->flags & PF_KTHREAD) {
759 put_task_struct(current->group_leader);
760 task = NULL;
761 } else {
762 task = current->group_leader;
763 }
764 task_unlock(current->group_leader);
765
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800766 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800767 if (!client)
768 goto err_put_task_struct;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800769
770 client->dev = dev;
771 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800772 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800773 mutex_init(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800774 client->task = task;
775 client->pid = pid;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800776 client->name = kstrdup(name, GFP_KERNEL);
777 if (!client->name)
778 goto err_free_client;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800779
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800780 down_write(&dev->lock);
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800781 client->display_serial = ion_get_client_serial(&dev->clients, name);
782 client->display_name = kasprintf(
783 GFP_KERNEL, "%s-%d", name, client->display_serial);
784 if (!client->display_name) {
785 up_write(&dev->lock);
786 goto err_free_client_name;
787 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800788 p = &dev->clients.rb_node;
789 while (*p) {
790 parent = *p;
791 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800792
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800793 if (client < entry)
794 p = &(*p)->rb_left;
795 else if (client > entry)
796 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800797 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800798 rb_link_node(&client->node, parent, p);
799 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800800
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800801 client->debug_root = debugfs_create_file(client->display_name, 0664,
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800802 dev->clients_debug_root,
803 client, &debug_client_fops);
804 if (!client->debug_root) {
805 char buf[256], *path;
Phong Tran04e14352014-08-13 20:37:05 +0700806
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800807 path = dentry_path(dev->clients_debug_root, buf, 256);
808 pr_err("Failed to create client debugfs at %s/%s\n",
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800809 path, client->display_name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -0800810 }
811
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800812 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800813
814 return client;
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800815
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800816err_free_client_name:
817 kfree(client->name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800818err_free_client:
819 kfree(client);
820err_put_task_struct:
821 if (task)
822 put_task_struct(current->group_leader);
823 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800824}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800825EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800826
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800827void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800828{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800829 struct ion_device *dev = client->dev;
830 struct rb_node *n;
831
832 pr_debug("%s: %d\n", __func__, __LINE__);
833 while ((n = rb_first(&client->handles))) {
834 struct ion_handle *handle = rb_entry(n, struct ion_handle,
835 node);
836 ion_handle_destroy(&handle->ref);
837 }
Colin Cross47b40452013-12-13 14:24:50 -0800838
Colin Cross47b40452013-12-13 14:24:50 -0800839 idr_destroy(&client->idr);
840
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800841 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800842 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800843 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800844 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800845 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800846 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800847
Mitchel Humpherys2803ac72014-02-17 13:58:37 -0800848 kfree(client->display_name);
Mitchel Humpherysae5cbf42014-02-17 13:58:36 -0800849 kfree(client->name);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800850 kfree(client);
851}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800852EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800853
Rebecca Schultz Zavince1f1472013-12-13 14:23:44 -0800854struct sg_table *ion_sg_table(struct ion_client *client,
855 struct ion_handle *handle)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800856{
857 struct ion_buffer *buffer;
858 struct sg_table *table;
859
860 mutex_lock(&client->lock);
861 if (!ion_handle_validate(client, handle)) {
862 pr_err("%s: invalid handle passed to map_dma.\n",
863 __func__);
864 mutex_unlock(&client->lock);
865 return ERR_PTR(-EINVAL);
866 }
867 buffer = handle->buffer;
868 table = buffer->sg_table;
869 mutex_unlock(&client->lock);
870 return table;
871}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800872EXPORT_SYMBOL(ion_sg_table);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800873
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800874static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
875 struct device *dev,
876 enum dma_data_direction direction);
877
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800878static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
879 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800880{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800881 struct dma_buf *dmabuf = attachment->dmabuf;
882 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800883
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800884 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800885 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800886}
887
888static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
889 struct sg_table *table,
890 enum dma_data_direction direction)
891{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800892}
893
Colin Crosse946b202013-12-13 14:25:01 -0800894void ion_pages_sync_for_device(struct device *dev, struct page *page,
895 size_t size, enum dma_data_direction dir)
896{
897 struct scatterlist sg;
898
899 sg_init_table(&sg, 1);
900 sg_set_page(&sg, page, size, 0);
901 /*
902 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
Tapasweni Pathak8e4ec4f2014-10-06 11:26:39 +0530903 * for the targeted device, but this works on the currently targeted
Colin Crosse946b202013-12-13 14:25:01 -0800904 * hardware.
905 */
906 sg_dma_address(&sg) = page_to_phys(page);
907 dma_sync_sg_for_device(dev, &sg, 1, dir);
908}
909
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800910struct ion_vma_list {
911 struct list_head list;
912 struct vm_area_struct *vma;
913};
914
915static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
916 struct device *dev,
917 enum dma_data_direction dir)
918{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800919 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800920 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
921 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800922
923 pr_debug("%s: syncing for device %s\n", __func__,
924 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800925
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800926 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800927 return;
928
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800929 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800930 for (i = 0; i < pages; i++) {
931 struct page *page = buffer->pages[i];
932
933 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800934 ion_pages_sync_for_device(dev, ion_buffer_page(page),
935 PAGE_SIZE, dir);
936
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800937 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800938 }
939 list_for_each_entry(vma_list, &buffer->vmas, list) {
940 struct vm_area_struct *vma = vma_list->vma;
941
942 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
943 NULL);
944 }
945 mutex_unlock(&buffer->lock);
946}
947
Colin Crossf63958d2013-12-13 19:26:28 -0800948static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800949{
950 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800951 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800952 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800953
954 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800955 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800956 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800957
958 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
959 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800960 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800961 if (ret)
962 return VM_FAULT_ERROR;
963
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800964 return VM_FAULT_NOPAGE;
965}
966
967static void ion_vm_open(struct vm_area_struct *vma)
968{
969 struct ion_buffer *buffer = vma->vm_private_data;
970 struct ion_vma_list *vma_list;
971
972 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
973 if (!vma_list)
974 return;
975 vma_list->vma = vma;
976 mutex_lock(&buffer->lock);
977 list_add(&vma_list->list, &buffer->vmas);
978 mutex_unlock(&buffer->lock);
979 pr_debug("%s: adding %p\n", __func__, vma);
980}
981
982static void ion_vm_close(struct vm_area_struct *vma)
983{
984 struct ion_buffer *buffer = vma->vm_private_data;
985 struct ion_vma_list *vma_list, *tmp;
986
987 pr_debug("%s\n", __func__);
988 mutex_lock(&buffer->lock);
989 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
990 if (vma_list->vma != vma)
991 continue;
992 list_del(&vma_list->list);
993 kfree(vma_list);
994 pr_debug("%s: deleting %p\n", __func__, vma);
995 break;
996 }
997 mutex_unlock(&buffer->lock);
998}
999
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07001000static const struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001001 .open = ion_vm_open,
1002 .close = ion_vm_close,
1003 .fault = ion_vm_fault,
1004};
1005
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001006static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001007{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001008 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001009 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001010
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001011 if (!buffer->heap->ops->map_user) {
Iulia Manda7287bb52014-03-11 20:10:37 +02001012 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1013 __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001014 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001015 }
1016
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -08001017 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -08001018 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1019 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001020 vma->vm_private_data = buffer;
1021 vma->vm_ops = &ion_vma_ops;
1022 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001023 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -08001024 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001025
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001026 if (!(buffer->flags & ION_FLAG_CACHED))
1027 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1028
1029 mutex_lock(&buffer->lock);
1030 /* now map it to userspace */
1031 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1032 mutex_unlock(&buffer->lock);
1033
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001034 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001035 pr_err("%s: failure mapping buffer to userspace\n",
1036 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001037
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001038 return ret;
1039}
1040
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001041static void ion_dma_buf_release(struct dma_buf *dmabuf)
1042{
1043 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001044
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001045 ion_buffer_put(buffer);
1046}
1047
1048static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1049{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001050 struct ion_buffer *buffer = dmabuf->priv;
Seunghun Lee10f62862014-05-01 01:30:23 +09001051
Greg Hackmann12edf532013-12-13 14:24:00 -08001052 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001053}
1054
1055static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1056 void *ptr)
1057{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001058}
1059
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001060static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001061 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001062{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001063 struct ion_buffer *buffer = dmabuf->priv;
1064 void *vaddr;
1065
1066 if (!buffer->heap->ops->map_kernel) {
1067 pr_err("%s: map kernel is not implemented by this heap.\n",
1068 __func__);
1069 return -ENODEV;
1070 }
1071
1072 mutex_lock(&buffer->lock);
1073 vaddr = ion_buffer_kmap_get(buffer);
1074 mutex_unlock(&buffer->lock);
Sachin Kamatab0c0692014-01-27 12:17:05 +05301075 return PTR_ERR_OR_ZERO(vaddr);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001076}
1077
Tiago Vignatti831e9da2015-12-22 19:36:45 -02001078static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001079 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001080{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001081 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001082
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001083 mutex_lock(&buffer->lock);
1084 ion_buffer_kmap_put(buffer);
1085 mutex_unlock(&buffer->lock);
1086}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001087
Colin Crossf63958d2013-12-13 19:26:28 -08001088static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001089 .map_dma_buf = ion_map_dma_buf,
1090 .unmap_dma_buf = ion_unmap_dma_buf,
1091 .mmap = ion_mmap,
1092 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001093 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1094 .end_cpu_access = ion_dma_buf_end_cpu_access,
1095 .kmap_atomic = ion_dma_buf_kmap,
1096 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001097 .kmap = ion_dma_buf_kmap,
1098 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001099};
1100
Johan Mossberg22ba4322013-12-13 14:24:34 -08001101struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1102 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001103{
Dmitry Kalinkin5605b182015-07-13 15:50:30 +03001104 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001105 struct ion_buffer *buffer;
1106 struct dma_buf *dmabuf;
1107 bool valid_handle;
Sumit Semwald8fbe342015-01-23 12:53:43 +05301108
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001109 mutex_lock(&client->lock);
1110 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001111 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001112 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001113 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001114 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001115 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001116 buffer = handle->buffer;
1117 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001118 mutex_unlock(&client->lock);
1119
Sumit Semwal72449cb2015-02-21 09:00:17 +05301120 exp_info.ops = &dma_buf_ops;
1121 exp_info.size = buffer->size;
1122 exp_info.flags = O_RDWR;
1123 exp_info.priv = buffer;
1124
Sumit Semwald8fbe342015-01-23 12:53:43 +05301125 dmabuf = dma_buf_export(&exp_info);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001126 if (IS_ERR(dmabuf)) {
1127 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001128 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001129 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001130
1131 return dmabuf;
1132}
1133EXPORT_SYMBOL(ion_share_dma_buf);
1134
1135int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1136{
1137 struct dma_buf *dmabuf;
1138 int fd;
1139
1140 dmabuf = ion_share_dma_buf(client, handle);
1141 if (IS_ERR(dmabuf))
1142 return PTR_ERR(dmabuf);
1143
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001144 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001145 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001146 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001147
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001148 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001149}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001150EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001151
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001152struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1153{
1154 struct dma_buf *dmabuf;
1155 struct ion_buffer *buffer;
1156 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001157 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001158
1159 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001160 if (IS_ERR(dmabuf))
Iulia Manda464a5022014-03-11 20:14:36 +02001161 return ERR_CAST(dmabuf);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001162 /* if this memory came from ion */
1163
1164 if (dmabuf->ops != &dma_buf_ops) {
1165 pr_err("%s: can not import dmabuf from another exporter\n",
1166 __func__);
1167 dma_buf_put(dmabuf);
1168 return ERR_PTR(-EINVAL);
1169 }
1170 buffer = dmabuf->priv;
1171
1172 mutex_lock(&client->lock);
1173 /* if a handle exists for this buffer just take a reference to it */
1174 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001175 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001176 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001177 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001178 goto end;
1179 }
Colin Cross83271f62013-12-13 14:24:59 -08001180
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001181 handle = ion_handle_create(client, buffer);
Shawn Lin6fa92e22015-09-09 15:41:52 +08001182 if (IS_ERR(handle)) {
1183 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001184 goto end;
Shawn Lin6fa92e22015-09-09 15:41:52 +08001185 }
Colin Cross83271f62013-12-13 14:24:59 -08001186
Colin Cross47b40452013-12-13 14:24:50 -08001187 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001188 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001189 if (ret) {
1190 ion_handle_put(handle);
1191 handle = ERR_PTR(ret);
1192 }
Colin Cross83271f62013-12-13 14:24:59 -08001193
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001194end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001195 dma_buf_put(dmabuf);
1196 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001197}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001198EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001199
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001200static int ion_sync_for_device(struct ion_client *client, int fd)
1201{
1202 struct dma_buf *dmabuf;
1203 struct ion_buffer *buffer;
1204
1205 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001206 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001207 return PTR_ERR(dmabuf);
1208
1209 /* if this memory came from ion */
1210 if (dmabuf->ops != &dma_buf_ops) {
1211 pr_err("%s: can not sync dmabuf from another exporter\n",
1212 __func__);
1213 dma_buf_put(dmabuf);
1214 return -EINVAL;
1215 }
1216 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001217
1218 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1219 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001220 dma_buf_put(dmabuf);
1221 return 0;
1222}
1223
Colin Crossdb866e32013-12-13 19:26:16 -08001224/* fix up the cases where the ioctl direction bits are incorrect */
1225static unsigned int ion_ioctl_dir(unsigned int cmd)
1226{
1227 switch (cmd) {
1228 case ION_IOC_SYNC:
1229 case ION_IOC_FREE:
1230 case ION_IOC_CUSTOM:
1231 return _IOC_WRITE;
1232 default:
1233 return _IOC_DIR(cmd);
1234 }
1235}
1236
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001237static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1238{
1239 struct ion_client *client = filp->private_data;
Colin Crossdb866e32013-12-13 19:26:16 -08001240 struct ion_device *dev = client->dev;
1241 struct ion_handle *cleanup_handle = NULL;
1242 int ret = 0;
1243 unsigned int dir;
1244
1245 union {
1246 struct ion_fd_data fd;
1247 struct ion_allocation_data allocation;
1248 struct ion_handle_data handle;
1249 struct ion_custom_data custom;
1250 } data;
1251
1252 dir = ion_ioctl_dir(cmd);
1253
1254 if (_IOC_SIZE(cmd) > sizeof(data))
1255 return -EINVAL;
1256
1257 if (dir & _IOC_WRITE)
1258 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1259 return -EFAULT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001260
1261 switch (cmd) {
1262 case ION_IOC_ALLOC:
1263 {
Colin Cross47b40452013-12-13 14:24:50 -08001264 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001265
Colin Crossdb866e32013-12-13 19:26:16 -08001266 handle = ion_alloc(client, data.allocation.len,
1267 data.allocation.align,
1268 data.allocation.heap_id_mask,
1269 data.allocation.flags);
Colin Cross47b40452013-12-13 14:24:50 -08001270 if (IS_ERR(handle))
1271 return PTR_ERR(handle);
1272
Colin Crossdb866e32013-12-13 19:26:16 -08001273 data.allocation.handle = handle->id;
KyongHo Cho54ac07842013-12-13 14:23:39 -08001274
Colin Crossdb866e32013-12-13 19:26:16 -08001275 cleanup_handle = handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001276 break;
1277 }
1278 case ION_IOC_FREE:
1279 {
Colin Cross47b40452013-12-13 14:24:50 -08001280 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001281
Colin Crossdb866e32013-12-13 19:26:16 -08001282 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001283 if (IS_ERR(handle))
1284 return PTR_ERR(handle);
Colin Cross47b40452013-12-13 14:24:50 -08001285 ion_free(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001286 ion_handle_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001287 break;
1288 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001289 case ION_IOC_SHARE:
Rebecca Schultz Zavindf0f6c72013-12-13 14:24:24 -08001290 case ION_IOC_MAP:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001291 {
Colin Cross47b40452013-12-13 14:24:50 -08001292 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001293
Colin Crossdb866e32013-12-13 19:26:16 -08001294 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001295 if (IS_ERR(handle))
1296 return PTR_ERR(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001297 data.fd.fd = ion_share_dma_buf_fd(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001298 ion_handle_put(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001299 if (data.fd.fd < 0)
1300 ret = data.fd.fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001301 break;
1302 }
1303 case ION_IOC_IMPORT:
1304 {
Colin Cross47b40452013-12-13 14:24:50 -08001305 struct ion_handle *handle;
Seunghun Lee10f62862014-05-01 01:30:23 +09001306
Colin Crossdb866e32013-12-13 19:26:16 -08001307 handle = ion_import_dma_buf(client, data.fd.fd);
Colin Cross47b40452013-12-13 14:24:50 -08001308 if (IS_ERR(handle))
1309 ret = PTR_ERR(handle);
1310 else
Colin Crossdb866e32013-12-13 19:26:16 -08001311 data.handle.handle = handle->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001312 break;
1313 }
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001314 case ION_IOC_SYNC:
1315 {
Colin Crossdb866e32013-12-13 19:26:16 -08001316 ret = ion_sync_for_device(client, data.fd.fd);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001317 break;
1318 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001319 case ION_IOC_CUSTOM:
1320 {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001321 if (!dev->custom_ioctl)
1322 return -ENOTTY;
Colin Crossdb866e32013-12-13 19:26:16 -08001323 ret = dev->custom_ioctl(client, data.custom.cmd,
1324 data.custom.arg);
1325 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001326 }
1327 default:
1328 return -ENOTTY;
1329 }
Colin Crossdb866e32013-12-13 19:26:16 -08001330
1331 if (dir & _IOC_READ) {
1332 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1333 if (cleanup_handle)
1334 ion_free(client, cleanup_handle);
1335 return -EFAULT;
1336 }
1337 }
1338 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001339}
1340
1341static int ion_release(struct inode *inode, struct file *file)
1342{
1343 struct ion_client *client = file->private_data;
1344
1345 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001346 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001347 return 0;
1348}
1349
1350static int ion_open(struct inode *inode, struct file *file)
1351{
1352 struct miscdevice *miscdev = file->private_data;
1353 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1354 struct ion_client *client;
Laura Abbott483ed032014-02-17 13:58:35 -08001355 char debug_name[64];
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001356
1357 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbott483ed032014-02-17 13:58:35 -08001358 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1359 client = ion_client_create(dev, debug_name);
Colin Cross9e907652013-12-13 14:24:49 -08001360 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001361 return PTR_ERR(client);
1362 file->private_data = client;
1363
1364 return 0;
1365}
1366
1367static const struct file_operations ion_fops = {
1368 .owner = THIS_MODULE,
1369 .open = ion_open,
1370 .release = ion_release,
1371 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001372 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001373};
1374
1375static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001376 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001377{
1378 size_t size = 0;
1379 struct rb_node *n;
1380
1381 mutex_lock(&client->lock);
1382 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1383 struct ion_handle *handle = rb_entry(n,
1384 struct ion_handle,
1385 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001386 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001387 size += handle->buffer->size;
1388 }
1389 mutex_unlock(&client->lock);
1390 return size;
1391}
1392
1393static int ion_debug_heap_show(struct seq_file *s, void *unused)
1394{
1395 struct ion_heap *heap = s->private;
1396 struct ion_device *dev = heap->dev;
1397 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001398 size_t total_size = 0;
1399 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001400
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001401 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
Iulia Manda164ad862014-03-11 20:12:29 +02001402 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001403
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001404 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001405 struct ion_client *client = rb_entry(n, struct ion_client,
1406 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001407 size_t size = ion_debug_heap_total(client, heap->id);
Seunghun Lee10f62862014-05-01 01:30:23 +09001408
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001409 if (!size)
1410 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001411 if (client->task) {
1412 char task_comm[TASK_COMM_LEN];
1413
1414 get_task_comm(task_comm, client->task);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001415 seq_printf(s, "%16s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001416 client->pid, size);
1417 } else {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001418 seq_printf(s, "%16s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001419 client->pid, size);
1420 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001421 }
Iulia Manda164ad862014-03-11 20:12:29 +02001422 seq_puts(s, "----------------------------------------------------\n");
1423 seq_puts(s, "orphaned allocations (info is from last known client):\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001424 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001425 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1426 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1427 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001428 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001429 continue;
1430 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001431 if (!buffer->handle_count) {
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001432 seq_printf(s, "%16s %16u %16zu %d %d\n",
Colin Crosse61fc912013-12-13 19:26:14 -08001433 buffer->task_comm, buffer->pid,
1434 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001435 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001436 total_orphaned_size += buffer->size;
1437 }
1438 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001439 mutex_unlock(&dev->buffer_lock);
Iulia Manda164ad862014-03-11 20:12:29 +02001440 seq_puts(s, "----------------------------------------------------\n");
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001441 seq_printf(s, "%16s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001442 total_orphaned_size);
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001443 seq_printf(s, "%16s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001444 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Rasmus Villemoesb5693962015-02-20 14:13:19 +01001445 seq_printf(s, "%16s %16zu\n", "deferred free",
Colin Cross2540c732013-12-13 14:24:47 -08001446 heap->free_list_size);
Iulia Manda164ad862014-03-11 20:12:29 +02001447 seq_puts(s, "----------------------------------------------------\n");
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001448
1449 if (heap->debug_show)
1450 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001451
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001452 return 0;
1453}
1454
1455static int ion_debug_heap_open(struct inode *inode, struct file *file)
1456{
1457 return single_open(file, ion_debug_heap_show, inode->i_private);
1458}
1459
1460static const struct file_operations debug_heap_fops = {
1461 .open = ion_debug_heap_open,
1462 .read = seq_read,
1463 .llseek = seq_lseek,
1464 .release = single_release,
1465};
1466
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001467static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001468{
John Stultze1d855b2013-12-13 19:26:33 -08001469 struct ion_heap *heap = data;
1470 struct shrink_control sc;
1471 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001472
John Stultze1d855b2013-12-13 19:26:33 -08001473 sc.gfp_mask = -1;
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001474 sc.nr_to_scan = val;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001475
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001476 if (!val) {
1477 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1478 sc.nr_to_scan = objs;
1479 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001480
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001481 heap->shrinker.scan_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001482 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001483}
1484
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001485static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001486{
John Stultze1d855b2013-12-13 19:26:33 -08001487 struct ion_heap *heap = data;
1488 struct shrink_control sc;
1489 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001490
John Stultze1d855b2013-12-13 19:26:33 -08001491 sc.gfp_mask = -1;
1492 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001493
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001494 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
John Stultze1d855b2013-12-13 19:26:33 -08001495 *val = objs;
1496 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001497}
1498
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001499DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
John Stultze1d855b2013-12-13 19:26:33 -08001500 debug_shrink_set, "%llu\n");
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001501
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001502void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1503{
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001504 struct dentry *debug_file;
1505
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001506 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1507 !heap->ops->unmap_dma)
1508 pr_err("%s: can not add heap with invalid ops struct.\n",
1509 __func__);
1510
Mitchel Humpherys95e53dd2015-01-08 17:24:27 -08001511 spin_lock_init(&heap->free_lock);
1512 heap->free_list_size = 0;
1513
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001514 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1515 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001516
Colin Crossb9daf0b2014-02-17 13:58:38 -08001517 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1518 ion_heap_init_shrinker(heap);
1519
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001520 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001521 down_write(&dev->lock);
Sriram Raghunathan7e416172015-09-22 22:35:51 +05301522 /*
1523 * use negative heap->id to reverse the priority -- when traversing
1524 * the list later attempt higher id numbers first
1525 */
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001526 plist_node_init(&heap->node, -heap->id);
1527 plist_add(&heap->node, &dev->heaps);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001528 debug_file = debugfs_create_file(heap->name, 0664,
1529 dev->heaps_debug_root, heap,
1530 &debug_heap_fops);
1531
1532 if (!debug_file) {
1533 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001534
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001535 path = dentry_path(dev->heaps_debug_root, buf, 256);
1536 pr_err("Failed to create heap debugfs at %s/%s\n",
1537 path, heap->name);
1538 }
1539
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001540 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001541 char debug_name[64];
1542
1543 snprintf(debug_name, 64, "%s_shrink", heap->name);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001544 debug_file = debugfs_create_file(
1545 debug_name, 0644, dev->heaps_debug_root, heap,
1546 &debug_shrink_fops);
1547 if (!debug_file) {
1548 char buf[256], *path;
Seunghun Lee10f62862014-05-01 01:30:23 +09001549
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001550 path = dentry_path(dev->heaps_debug_root, buf, 256);
1551 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1552 path, debug_name);
1553 }
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001554 }
Gioh Kimaeb7fa72015-07-06 15:14:41 +09001555
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001556 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001557}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001558EXPORT_SYMBOL(ion_device_add_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001559
1560struct ion_device *ion_device_create(long (*custom_ioctl)
1561 (struct ion_client *client,
1562 unsigned int cmd,
1563 unsigned long arg))
1564{
1565 struct ion_device *idev;
1566 int ret;
1567
1568 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1569 if (!idev)
1570 return ERR_PTR(-ENOMEM);
1571
1572 idev->dev.minor = MISC_DYNAMIC_MINOR;
1573 idev->dev.name = "ion";
1574 idev->dev.fops = &ion_fops;
1575 idev->dev.parent = NULL;
1576 ret = misc_register(&idev->dev);
1577 if (ret) {
1578 pr_err("ion: failed to register misc device.\n");
Shailendra Verma283d9302015-05-19 20:29:00 +05301579 kfree(idev);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001580 return ERR_PTR(ret);
1581 }
1582
1583 idev->debug_root = debugfs_create_dir("ion", NULL);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001584 if (!idev->debug_root) {
1585 pr_err("ion: failed to create debugfs root directory.\n");
1586 goto debugfs_done;
1587 }
1588 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1589 if (!idev->heaps_debug_root) {
1590 pr_err("ion: failed to create debugfs heaps directory.\n");
1591 goto debugfs_done;
1592 }
1593 idev->clients_debug_root = debugfs_create_dir("clients",
1594 idev->debug_root);
1595 if (!idev->clients_debug_root)
1596 pr_err("ion: failed to create debugfs clients directory.\n");
1597
1598debugfs_done:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001599
1600 idev->custom_ioctl = custom_ioctl;
1601 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001602 mutex_init(&idev->buffer_lock);
1603 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001604 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001605 idev->clients = RB_ROOT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001606 return idev;
1607}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001608EXPORT_SYMBOL(ion_device_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001609
1610void ion_device_destroy(struct ion_device *dev)
1611{
1612 misc_deregister(&dev->dev);
Mitchel Humpherysb08585fb2014-02-17 13:58:34 -08001613 debugfs_remove_recursive(dev->debug_root);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001614 /* XXX need to free the heaps and clients ? */
1615 kfree(dev);
1616}
Paul Gortmaker8c6c4632015-10-13 16:46:53 -04001617EXPORT_SYMBOL(ion_device_destroy);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001618
1619void __init ion_reserve(struct ion_platform_data *data)
1620{
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001621 int i;
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001622
1623 for (i = 0; i < data->nr; i++) {
1624 if (data->heaps[i].size == 0)
1625 continue;
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001626
1627 if (data->heaps[i].base == 0) {
1628 phys_addr_t paddr;
Seunghun Lee10f62862014-05-01 01:30:23 +09001629
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001630 paddr = memblock_alloc_base(data->heaps[i].size,
1631 data->heaps[i].align,
1632 MEMBLOCK_ALLOC_ANYWHERE);
1633 if (!paddr) {
Daeseok Youn51108982014-02-10 20:16:50 +09001634 pr_err("%s: error allocating memblock for heap %d\n",
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001635 __func__, i);
1636 continue;
1637 }
1638 data->heaps[i].base = paddr;
1639 } else {
1640 int ret = memblock_reserve(data->heaps[i].base,
1641 data->heaps[i].size);
1642 if (ret)
Colin Crosse61fc912013-12-13 19:26:14 -08001643 pr_err("memblock reserve of %zx@%lx failed\n",
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001644 data->heaps[i].size,
1645 data->heaps[i].base);
1646 }
Colin Crosse61fc912013-12-13 19:26:14 -08001647 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001648 data->heaps[i].name,
1649 data->heaps[i].base,
1650 data->heaps[i].size);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001651 }
1652}