blob: 559e4ee6f7e5c0137cfc21e75c9fc25012990a8e [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -08002
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08003 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080020#include <linux/freezer.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/fs.h>
22#include <linux/anon_inodes.h>
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -080023#include <linux/kthread.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080024#include <linux/list.h>
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -080025#include <linux/memblock.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080026#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080031#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -080034#include <linux/vmalloc.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080035#include <linux/debugfs.h>
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080036#include <linux/dma-buf.h>
Colin Cross47b40452013-12-13 14:24:50 -080037#include <linux/idr.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080038
39#include "ion.h"
40#include "ion_priv.h"
Rom Lemarchand827c8492013-12-13 14:24:55 -080041#include "compat_ion.h"
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080042
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080046 * @buffers: an rb tree of all the existing buffers
47 * @buffer_lock: lock protecting the tree of buffers
48 * @lock: rwsem protecting the tree of heaps and clients
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080049 * @heaps: list of all the heaps in the system
50 * @user_clients: list of all the clients created from userspace
51 */
52struct ion_device {
53 struct miscdevice dev;
54 struct rb_root buffers;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -080055 struct mutex buffer_lock;
56 struct rw_semaphore lock;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -080057 struct plist_head heaps;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080058 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59 unsigned long arg);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -080060 struct rb_root clients;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080061 struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080066 * @node: node in the tree of all clients
67 * @dev: backpointer to ion device
68 * @handles: an rb tree of all the handles in this client
Colin Cross47b40452013-12-13 14:24:50 -080069 * @idr: an idr space for allocating handle ids
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080070 * @lock: lock protecting the tree of handles
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080071 * @name: used for debugging
72 * @task: used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080079 struct rb_node node;
80 struct ion_device *dev;
81 struct rb_root handles;
Colin Cross47b40452013-12-13 14:24:50 -080082 struct idr idr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080083 struct mutex lock;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080084 const char *name;
85 struct task_struct *task;
86 pid_t pid;
87 struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref: reference count
93 * @client: back pointer to the client the buffer resides in
94 * @buffer: pointer to the buffer
95 * @node: node in the client's handle rbtree
96 * @kmap_cnt: count of times this client has mapped to kernel
Colin Cross47b40452013-12-13 14:24:50 -080097 * @id: client-unique id allocated by client->idr
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080098 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client. Other fields are never changed after initialization.
101 */
102struct ion_handle {
103 struct kref ref;
104 struct ion_client *client;
105 struct ion_buffer *buffer;
106 struct rb_node node;
107 unsigned int kmap_cnt;
Colin Cross47b40452013-12-13 14:24:50 -0800108 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800109};
110
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800113 return ((buffer->flags & ION_FLAG_CACHED) &&
114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800115}
116
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800119 return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124 return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129 return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134 *page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800140}
141
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144 struct ion_buffer *buffer)
145{
146 struct rb_node **p = &dev->buffers.rb_node;
147 struct rb_node *parent = NULL;
148 struct ion_buffer *entry;
149
150 while (*p) {
151 parent = *p;
152 entry = rb_entry(parent, struct ion_buffer, node);
153
154 if (buffer < entry) {
155 p = &(*p)->rb_left;
156 } else if (buffer > entry) {
157 p = &(*p)->rb_right;
158 } else {
159 pr_err("%s: buffer already found.", __func__);
160 BUG();
161 }
162 }
163
164 rb_link_node(&buffer->node, parent, p);
165 rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170 struct ion_device *dev,
171 unsigned long len,
172 unsigned long align,
173 unsigned long flags)
174{
175 struct ion_buffer *buffer;
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800176 struct sg_table *table;
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800177 struct scatterlist *sg;
178 int i, ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800179
180 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181 if (!buffer)
182 return ERR_PTR(-ENOMEM);
183
184 buffer->heap = heap;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800185 buffer->flags = flags;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800186 kref_init(&buffer->ref);
187
188 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800189
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800190 if (ret) {
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800191 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192 goto err2;
193
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800194 ion_heap_freelist_drain(heap, 0);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800195 ret = heap->ops->allocate(heap, buffer, len, align,
196 flags);
197 if (ret)
198 goto err2;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800199 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800200
Greg Hackmann056be392013-12-13 14:23:45 -0800201 buffer->dev = dev;
202 buffer->size = len;
203
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800204 table = heap->ops->map_dma(heap, buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800205 if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206 table = ERR_PTR(-EINVAL);
207 if (IS_ERR(table)) {
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800208 heap->ops->free(buffer);
209 kfree(buffer);
210 return ERR_PTR(PTR_ERR(table));
211 }
212 buffer->sg_table = table;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800213 if (ion_buffer_fault_user_mappings(buffer)) {
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800214 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215 struct scatterlist *sg;
216 int i, j, k = 0;
217
218 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219 if (!buffer->pages) {
220 ret = -ENOMEM;
221 goto err1;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800222 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800223
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800224 for_each_sg(table->sgl, sg, table->nents, i) {
225 struct page *page = sg_page(sg);
226
Colin Cross06e0dca2013-12-13 14:25:02 -0800227 for (j = 0; j < sg->length / PAGE_SIZE; j++)
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800228 buffer->pages[k++] = page++;
229 }
230
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800231 if (ret)
232 goto err;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800233 }
234
235 buffer->dev = dev;
236 buffer->size = len;
237 INIT_LIST_HEAD(&buffer->vmas);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800238 mutex_init(&buffer->lock);
Rebecca Schultz Zavina46b6b22013-12-13 14:23:46 -0800239 /* this will set up dma addresses for the sglist -- it is not
240 technically correct as per the dma api -- a specific
241 device isn't really taking ownership here. However, in practice on
242 our systems the only dma_address space is physical addresses.
243 Additionally, we can't afford the overhead of invalidating every
244 allocation via dma_map_sg. The implicit contract here is that
245 memory comming from the heaps is ready for dma, ie if it has a
246 cached mapping that mapping has been invalidated */
247 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248 sg_dma_address(sg) = sg_phys(sg);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800249 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800250 ion_buffer_add(dev, buffer);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800251 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800252 return buffer;
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800253
254err:
255 heap->ops->unmap_dma(heap, buffer);
256 heap->ops->free(buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800257err1:
258 if (buffer->pages)
259 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800260err2:
Rebecca Schultz Zavind3c0bce2013-12-13 14:24:04 -0800261 kfree(buffer);
262 return ERR_PTR(ret);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800263}
264
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800265void ion_buffer_destroy(struct ion_buffer *buffer)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800266{
267 if (WARN_ON(buffer->kmap_cnt > 0))
268 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
269 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
270 buffer->heap->ops->free(buffer);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800271 if (buffer->pages)
272 vfree(buffer->pages);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800273 kfree(buffer);
274}
275
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800276static void _ion_buffer_destroy(struct kref *kref)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800277{
278 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800279 struct ion_heap *heap = buffer->heap;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800280 struct ion_device *dev = buffer->dev;
281
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800282 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800283 rb_erase(&buffer->node, &dev->buffers);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800284 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -0800285
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800286 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287 ion_heap_freelist_add(heap, buffer);
288 else
289 ion_buffer_destroy(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294 kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -0800299 return kref_put(&buffer->ref, _ion_buffer_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300}
301
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800304 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800305 buffer->handle_count++;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800306 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311 /*
312 * when a buffer is removed from a handle, if it is not in
313 * any other handles, copy the taskcomm and the pid of the
314 * process it's being removed from into the buffer. At this
315 * point there will be no way to track what processes this buffer is
316 * being used by, it only exists as a dma_buf file descriptor.
317 * The taskcomm and pid can provide a debug hint as to where this fd
318 * is in the system
319 */
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800320 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800321 buffer->handle_count--;
322 BUG_ON(buffer->handle_count < 0);
323 if (!buffer->handle_count) {
324 struct task_struct *task;
325
326 task = current->group_leader;
327 get_task_comm(buffer->task_comm, task);
328 buffer->pid = task_pid_nr(task);
329 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800330 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800331}
332
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800333static struct ion_handle *ion_handle_create(struct ion_client *client,
334 struct ion_buffer *buffer)
335{
336 struct ion_handle *handle;
337
338 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339 if (!handle)
340 return ERR_PTR(-ENOMEM);
341 kref_init(&handle->ref);
342 RB_CLEAR_NODE(&handle->node);
343 handle->client = client;
344 ion_buffer_get(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800345 ion_buffer_add_to_handle(buffer);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800346 handle->buffer = buffer;
347
348 return handle;
349}
350
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800351static void ion_handle_kmap_put(struct ion_handle *);
352
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800353static void ion_handle_destroy(struct kref *kref)
354{
355 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800356 struct ion_client *client = handle->client;
357 struct ion_buffer *buffer = handle->buffer;
358
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800359 mutex_lock(&buffer->lock);
Laura Abbott2900cd72013-12-13 14:23:48 -0800360 while (handle->kmap_cnt)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800361 ion_handle_kmap_put(handle);
362 mutex_unlock(&buffer->lock);
363
Colin Cross47b40452013-12-13 14:24:50 -0800364 idr_remove(&client->idr, handle->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800365 if (!RB_EMPTY_NODE(&handle->node))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800366 rb_erase(&handle->node, &client->handles);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800367
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800368 ion_buffer_remove_from_handle(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800369 ion_buffer_put(buffer);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -0800370
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800371 kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376 return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381 kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
Colin Cross83271f62013-12-13 14:24:59 -0800386 struct ion_client *client = handle->client;
387 int ret;
388
389 mutex_lock(&client->lock);
390 ret = kref_put(&handle->ref, ion_handle_destroy);
391 mutex_unlock(&client->lock);
392
393 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800394}
395
396static struct ion_handle *ion_handle_lookup(struct ion_client *client,
397 struct ion_buffer *buffer)
398{
Colin Crosse1cf3682013-12-13 14:24:51 -0800399 struct rb_node *n = client->handles.rb_node;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800400
Colin Crosse1cf3682013-12-13 14:24:51 -0800401 while (n) {
402 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
403 if (buffer < entry->buffer)
404 n = n->rb_left;
405 else if (buffer > entry->buffer)
406 n = n->rb_right;
407 else
408 return entry;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800409 }
Colin Cross9e907652013-12-13 14:24:49 -0800410 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800411}
412
Colin Cross83271f62013-12-13 14:24:59 -0800413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
414 int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800415{
Colin Cross83271f62013-12-13 14:24:59 -0800416 struct ion_handle *handle;
417
418 mutex_lock(&client->lock);
419 handle = idr_find(&client->idr, id);
420 if (handle)
421 ion_handle_get(handle);
422 mutex_unlock(&client->lock);
423
424 return handle ? handle : ERR_PTR(-EINVAL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800425}
426
Colin Cross47b40452013-12-13 14:24:50 -0800427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800428{
Colin Cross83271f62013-12-13 14:24:59 -0800429 WARN_ON(!mutex_is_locked(&client->lock));
430 return (idr_find(&client->idr, handle->id) == handle);
Colin Cross47b40452013-12-13 14:24:50 -0800431}
432
433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
434{
Colin Crossb26661d2013-12-13 14:25:05 -0800435 int id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800436 struct rb_node **p = &client->handles.rb_node;
437 struct rb_node *parent = NULL;
438 struct ion_handle *entry;
439
Colin Crossb26661d2013-12-13 14:25:05 -0800440 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
441 if (id < 0)
442 return id;
Colin Cross47b40452013-12-13 14:24:50 -0800443
Colin Crossb26661d2013-12-13 14:25:05 -0800444 handle->id = id;
Colin Cross47b40452013-12-13 14:24:50 -0800445
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800446 while (*p) {
447 parent = *p;
448 entry = rb_entry(parent, struct ion_handle, node);
449
Colin Crosse1cf3682013-12-13 14:24:51 -0800450 if (handle->buffer < entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800451 p = &(*p)->rb_left;
Colin Crosse1cf3682013-12-13 14:24:51 -0800452 else if (handle->buffer > entry->buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800453 p = &(*p)->rb_right;
454 else
455 WARN(1, "%s: buffer already found.", __func__);
456 }
457
458 rb_link_node(&handle->node, parent, p);
459 rb_insert_color(&handle->node, &client->handles);
Colin Cross47b40452013-12-13 14:24:50 -0800460
461 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800462}
463
464struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800465 size_t align, unsigned int heap_id_mask,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800466 unsigned int flags)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800467{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800468 struct ion_handle *handle;
469 struct ion_device *dev = client->dev;
470 struct ion_buffer *buffer = NULL;
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800471 struct ion_heap *heap;
Colin Cross47b40452013-12-13 14:24:50 -0800472 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800473
Colin Crosse61fc912013-12-13 19:26:14 -0800474 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800475 len, align, heap_id_mask, flags);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800476 /*
477 * traverse the list of heaps available in this system in priority
478 * order. If the heap type is supported by the client, and matches the
479 * request of the caller allocate from it. Repeat until allocate has
480 * succeeded or all heaps have been tried
481 */
KyongHo Cho54ac07842013-12-13 14:23:39 -0800482 len = PAGE_ALIGN(len);
483
Colin Crossa14baf72013-12-13 14:25:00 -0800484 if (!len)
485 return ERR_PTR(-EINVAL);
486
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800487 down_read(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -0800488 plist_for_each_entry(heap, &dev->heaps, node) {
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800489 /* if the caller didn't specify this heap id */
490 if (!((1 << heap->id) & heap_id_mask))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800491 continue;
492 buffer = ion_buffer_create(heap, dev, len, align, flags);
Colin Cross9e907652013-12-13 14:24:49 -0800493 if (!IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800494 break;
495 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800496 up_read(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800497
KyongHo Cho54ac07842013-12-13 14:23:39 -0800498 if (buffer == NULL)
499 return ERR_PTR(-ENODEV);
500
501 if (IS_ERR(buffer))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800502 return ERR_PTR(PTR_ERR(buffer));
503
504 handle = ion_handle_create(client, buffer);
505
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800506 /*
507 * ion_buffer_create will create a buffer with a ref_cnt of 1,
508 * and ion_handle_create will take a second reference, drop one here
509 */
510 ion_buffer_put(buffer);
511
Colin Cross47b40452013-12-13 14:24:50 -0800512 if (IS_ERR(handle))
513 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800514
Colin Cross47b40452013-12-13 14:24:50 -0800515 mutex_lock(&client->lock);
516 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -0800517 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -0800518 if (ret) {
519 ion_handle_put(handle);
520 handle = ERR_PTR(ret);
521 }
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800522
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800523 return handle;
524}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800525EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800526
527void ion_free(struct ion_client *client, struct ion_handle *handle)
528{
529 bool valid_handle;
530
531 BUG_ON(client != handle->client);
532
533 mutex_lock(&client->lock);
534 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800535
536 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -0800537 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin37bdbf02013-12-13 14:24:02 -0800538 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800539 return;
540 }
Rebecca Schultz Zavin0e9c03a2013-12-13 14:24:01 -0800541 mutex_unlock(&client->lock);
Colin Cross83271f62013-12-13 14:24:59 -0800542 ion_handle_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800543}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800544EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800545
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800546int ion_phys(struct ion_client *client, struct ion_handle *handle,
547 ion_phys_addr_t *addr, size_t *len)
548{
549 struct ion_buffer *buffer;
550 int ret;
551
552 mutex_lock(&client->lock);
553 if (!ion_handle_validate(client, handle)) {
554 mutex_unlock(&client->lock);
555 return -EINVAL;
556 }
557
558 buffer = handle->buffer;
559
560 if (!buffer->heap->ops->phys) {
561 pr_err("%s: ion_phys is not implemented by this heap.\n",
562 __func__);
563 mutex_unlock(&client->lock);
564 return -ENODEV;
565 }
566 mutex_unlock(&client->lock);
567 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
568 return ret;
569}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800570EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800571
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800572static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
573{
574 void *vaddr;
575
576 if (buffer->kmap_cnt) {
577 buffer->kmap_cnt++;
578 return buffer->vaddr;
579 }
580 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800581 if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
582 return ERR_PTR(-EINVAL);
583 if (IS_ERR(vaddr))
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800584 return vaddr;
585 buffer->vaddr = vaddr;
586 buffer->kmap_cnt++;
587 return vaddr;
588}
589
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800590static void *ion_handle_kmap_get(struct ion_handle *handle)
591{
592 struct ion_buffer *buffer = handle->buffer;
593 void *vaddr;
594
595 if (handle->kmap_cnt) {
596 handle->kmap_cnt++;
597 return buffer->vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800598 }
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800599 vaddr = ion_buffer_kmap_get(buffer);
Colin Cross9e907652013-12-13 14:24:49 -0800600 if (IS_ERR(vaddr))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800601 return vaddr;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800602 handle->kmap_cnt++;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800603 return vaddr;
604}
605
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800606static void ion_buffer_kmap_put(struct ion_buffer *buffer)
607{
608 buffer->kmap_cnt--;
609 if (!buffer->kmap_cnt) {
610 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
611 buffer->vaddr = NULL;
612 }
613}
614
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800615static void ion_handle_kmap_put(struct ion_handle *handle)
616{
617 struct ion_buffer *buffer = handle->buffer;
618
619 handle->kmap_cnt--;
620 if (!handle->kmap_cnt)
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800621 ion_buffer_kmap_put(buffer);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800622}
623
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800624void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
625{
626 struct ion_buffer *buffer;
627 void *vaddr;
628
629 mutex_lock(&client->lock);
630 if (!ion_handle_validate(client, handle)) {
631 pr_err("%s: invalid handle passed to map_kernel.\n",
632 __func__);
633 mutex_unlock(&client->lock);
634 return ERR_PTR(-EINVAL);
635 }
636
637 buffer = handle->buffer;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800638
639 if (!handle->buffer->heap->ops->map_kernel) {
640 pr_err("%s: map_kernel is not implemented by this heap.\n",
641 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800642 mutex_unlock(&client->lock);
643 return ERR_PTR(-ENODEV);
644 }
645
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800646 mutex_lock(&buffer->lock);
647 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800648 mutex_unlock(&buffer->lock);
649 mutex_unlock(&client->lock);
650 return vaddr;
651}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800652EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800653
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800654void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
655{
656 struct ion_buffer *buffer;
657
658 mutex_lock(&client->lock);
659 buffer = handle->buffer;
660 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800661 ion_handle_kmap_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800662 mutex_unlock(&buffer->lock);
663 mutex_unlock(&client->lock);
664}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800665EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800666
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800667static int ion_debug_client_show(struct seq_file *s, void *unused)
668{
669 struct ion_client *client = s->private;
670 struct rb_node *n;
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800671 size_t sizes[ION_NUM_HEAP_IDS] = {0};
Colin Crossf63958d2013-12-13 19:26:28 -0800672 const char *names[ION_NUM_HEAP_IDS] = {NULL};
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800673 int i;
674
675 mutex_lock(&client->lock);
676 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
677 struct ion_handle *handle = rb_entry(n, struct ion_handle,
678 node);
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800679 unsigned int id = handle->buffer->heap->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800680
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800681 if (!names[id])
682 names[id] = handle->buffer->heap->name;
683 sizes[id] += handle->buffer->size;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800684 }
685 mutex_unlock(&client->lock);
686
687 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
Rebecca Schultz Zavin38eeeb52013-12-13 14:24:28 -0800688 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800689 if (!names[i])
690 continue;
Colin Crosse61fc912013-12-13 19:26:14 -0800691 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800692 }
693 return 0;
694}
695
696static int ion_debug_client_open(struct inode *inode, struct file *file)
697{
698 return single_open(file, ion_debug_client_show, inode->i_private);
699}
700
701static const struct file_operations debug_client_fops = {
702 .open = ion_debug_client_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = single_release,
706};
707
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800708struct ion_client *ion_client_create(struct ion_device *dev,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800709 const char *name)
710{
711 struct ion_client *client;
712 struct task_struct *task;
713 struct rb_node **p;
714 struct rb_node *parent = NULL;
715 struct ion_client *entry;
716 char debug_name[64];
717 pid_t pid;
718
719 get_task_struct(current->group_leader);
720 task_lock(current->group_leader);
721 pid = task_pid_nr(current->group_leader);
722 /* don't bother to store task struct for kernel threads,
723 they can't be killed anyway */
724 if (current->group_leader->flags & PF_KTHREAD) {
725 put_task_struct(current->group_leader);
726 task = NULL;
727 } else {
728 task = current->group_leader;
729 }
730 task_unlock(current->group_leader);
731
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800732 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
733 if (!client) {
KyongHo Cho54ac07842013-12-13 14:23:39 -0800734 if (task)
735 put_task_struct(current->group_leader);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800736 return ERR_PTR(-ENOMEM);
737 }
738
739 client->dev = dev;
740 client->handles = RB_ROOT;
Colin Cross47b40452013-12-13 14:24:50 -0800741 idr_init(&client->idr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800742 mutex_init(&client->lock);
743 client->name = name;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800744 client->task = task;
745 client->pid = pid;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800746
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800747 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800748 p = &dev->clients.rb_node;
749 while (*p) {
750 parent = *p;
751 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800752
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800753 if (client < entry)
754 p = &(*p)->rb_left;
755 else if (client > entry)
756 p = &(*p)->rb_right;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800757 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800758 rb_link_node(&client->node, parent, p);
759 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800760
761 snprintf(debug_name, 64, "%u", client->pid);
762 client->debug_root = debugfs_create_file(debug_name, 0664,
763 dev->debug_root, client,
764 &debug_client_fops);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800765 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800766
767 return client;
768}
Johan Mossberg9122fe82013-12-13 14:24:29 -0800769EXPORT_SYMBOL(ion_client_create);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800770
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800771void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800772{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800773 struct ion_device *dev = client->dev;
774 struct rb_node *n;
775
776 pr_debug("%s: %d\n", __func__, __LINE__);
777 while ((n = rb_first(&client->handles))) {
778 struct ion_handle *handle = rb_entry(n, struct ion_handle,
779 node);
780 ion_handle_destroy(&handle->ref);
781 }
Colin Cross47b40452013-12-13 14:24:50 -0800782
Colin Cross47b40452013-12-13 14:24:50 -0800783 idr_destroy(&client->idr);
784
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800785 down_write(&dev->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800786 if (client->task)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800787 put_task_struct(client->task);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800788 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800789 debugfs_remove_recursive(client->debug_root);
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -0800790 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800791
792 kfree(client);
793}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800794EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800795
Rebecca Schultz Zavince1f1472013-12-13 14:23:44 -0800796struct sg_table *ion_sg_table(struct ion_client *client,
797 struct ion_handle *handle)
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800798{
799 struct ion_buffer *buffer;
800 struct sg_table *table;
801
802 mutex_lock(&client->lock);
803 if (!ion_handle_validate(client, handle)) {
804 pr_err("%s: invalid handle passed to map_dma.\n",
805 __func__);
806 mutex_unlock(&client->lock);
807 return ERR_PTR(-EINVAL);
808 }
809 buffer = handle->buffer;
810 table = buffer->sg_table;
811 mutex_unlock(&client->lock);
812 return table;
813}
Olav Hauganee4c8aa2013-12-13 14:23:55 -0800814EXPORT_SYMBOL(ion_sg_table);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800815
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800816static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
817 struct device *dev,
818 enum dma_data_direction direction);
819
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800820static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
821 enum dma_data_direction direction)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800822{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800823 struct dma_buf *dmabuf = attachment->dmabuf;
824 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800825
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800826 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -0800827 return buffer->sg_table;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800828}
829
830static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
831 struct sg_table *table,
832 enum dma_data_direction direction)
833{
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800834}
835
Colin Crosse946b202013-12-13 14:25:01 -0800836void ion_pages_sync_for_device(struct device *dev, struct page *page,
837 size_t size, enum dma_data_direction dir)
838{
839 struct scatterlist sg;
840
841 sg_init_table(&sg, 1);
842 sg_set_page(&sg, page, size, 0);
843 /*
844 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
845 * for the the targeted device, but this works on the currently targeted
846 * hardware.
847 */
848 sg_dma_address(&sg) = page_to_phys(page);
849 dma_sync_sg_for_device(dev, &sg, 1, dir);
850}
851
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800852struct ion_vma_list {
853 struct list_head list;
854 struct vm_area_struct *vma;
855};
856
857static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
858 struct device *dev,
859 enum dma_data_direction dir)
860{
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800861 struct ion_vma_list *vma_list;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800862 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
863 int i;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800864
865 pr_debug("%s: syncing for device %s\n", __func__,
866 dev ? dev_name(dev) : "null");
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800867
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800868 if (!ion_buffer_fault_user_mappings(buffer))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -0800869 return;
870
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800871 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800872 for (i = 0; i < pages; i++) {
873 struct page *page = buffer->pages[i];
874
875 if (ion_buffer_page_is_dirty(page))
Colin Crosse946b202013-12-13 14:25:01 -0800876 ion_pages_sync_for_device(dev, ion_buffer_page(page),
877 PAGE_SIZE, dir);
878
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800879 ion_buffer_page_clean(buffer->pages + i);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800880 }
881 list_for_each_entry(vma_list, &buffer->vmas, list) {
882 struct vm_area_struct *vma = vma_list->vma;
883
884 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
885 NULL);
886 }
887 mutex_unlock(&buffer->lock);
888}
889
Colin Crossf63958d2013-12-13 19:26:28 -0800890static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800891{
892 struct ion_buffer *buffer = vma->vm_private_data;
Colin Cross462be0c62013-12-13 19:26:24 -0800893 unsigned long pfn;
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800894 int ret;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800895
896 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800897 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800898 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
Colin Cross462be0c62013-12-13 19:26:24 -0800899
900 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
901 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800902 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc13bd1c2013-12-13 14:24:45 -0800903 if (ret)
904 return VM_FAULT_ERROR;
905
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800906 return VM_FAULT_NOPAGE;
907}
908
909static void ion_vm_open(struct vm_area_struct *vma)
910{
911 struct ion_buffer *buffer = vma->vm_private_data;
912 struct ion_vma_list *vma_list;
913
914 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
915 if (!vma_list)
916 return;
917 vma_list->vma = vma;
918 mutex_lock(&buffer->lock);
919 list_add(&vma_list->list, &buffer->vmas);
920 mutex_unlock(&buffer->lock);
921 pr_debug("%s: adding %p\n", __func__, vma);
922}
923
924static void ion_vm_close(struct vm_area_struct *vma)
925{
926 struct ion_buffer *buffer = vma->vm_private_data;
927 struct ion_vma_list *vma_list, *tmp;
928
929 pr_debug("%s\n", __func__);
930 mutex_lock(&buffer->lock);
931 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
932 if (vma_list->vma != vma)
933 continue;
934 list_del(&vma_list->list);
935 kfree(vma_list);
936 pr_debug("%s: deleting %p\n", __func__, vma);
937 break;
938 }
939 mutex_unlock(&buffer->lock);
940}
941
Colin Crossf63958d2013-12-13 19:26:28 -0800942static struct vm_operations_struct ion_vma_ops = {
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800943 .open = ion_vm_open,
944 .close = ion_vm_close,
945 .fault = ion_vm_fault,
946};
947
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800948static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800949{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800950 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800951 int ret = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800952
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800953 if (!buffer->heap->ops->map_user) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800954 pr_err("%s: this heap does not define a method for mapping "
955 "to userspace\n", __func__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800956 return -EINVAL;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800957 }
958
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800959 if (ion_buffer_fault_user_mappings(buffer)) {
Colin Cross462be0c62013-12-13 19:26:24 -0800960 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
961 VM_DONTDUMP;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800962 vma->vm_private_data = buffer;
963 vma->vm_ops = &ion_vma_ops;
964 ion_vm_open(vma);
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800965 return 0;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800966 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800967
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -0800968 if (!(buffer->flags & ION_FLAG_CACHED))
969 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
970
971 mutex_lock(&buffer->lock);
972 /* now map it to userspace */
973 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
974 mutex_unlock(&buffer->lock);
975
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800976 if (ret)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800977 pr_err("%s: failure mapping buffer to userspace\n",
978 __func__);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800979
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800980 return ret;
981}
982
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800983static void ion_dma_buf_release(struct dma_buf *dmabuf)
984{
985 struct ion_buffer *buffer = dmabuf->priv;
986 ion_buffer_put(buffer);
987}
988
989static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
990{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -0800991 struct ion_buffer *buffer = dmabuf->priv;
Greg Hackmann12edf532013-12-13 14:24:00 -0800992 return buffer->vaddr + offset * PAGE_SIZE;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -0800993}
994
995static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
996 void *ptr)
997{
998 return;
999}
1000
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001001static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1002 size_t len,
1003 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001004{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001005 struct ion_buffer *buffer = dmabuf->priv;
1006 void *vaddr;
1007
1008 if (!buffer->heap->ops->map_kernel) {
1009 pr_err("%s: map kernel is not implemented by this heap.\n",
1010 __func__);
1011 return -ENODEV;
1012 }
1013
1014 mutex_lock(&buffer->lock);
1015 vaddr = ion_buffer_kmap_get(buffer);
1016 mutex_unlock(&buffer->lock);
1017 if (IS_ERR(vaddr))
1018 return PTR_ERR(vaddr);
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001019 return 0;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001020}
1021
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001022static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1023 size_t len,
1024 enum dma_data_direction direction)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001025{
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001026 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001027
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001028 mutex_lock(&buffer->lock);
1029 ion_buffer_kmap_put(buffer);
1030 mutex_unlock(&buffer->lock);
1031}
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001032
Colin Crossf63958d2013-12-13 19:26:28 -08001033static struct dma_buf_ops dma_buf_ops = {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001034 .map_dma_buf = ion_map_dma_buf,
1035 .unmap_dma_buf = ion_unmap_dma_buf,
1036 .mmap = ion_mmap,
1037 .release = ion_dma_buf_release,
Rebecca Schultz Zavin0f34faf2013-12-13 14:23:42 -08001038 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1039 .end_cpu_access = ion_dma_buf_end_cpu_access,
1040 .kmap_atomic = ion_dma_buf_kmap,
1041 .kunmap_atomic = ion_dma_buf_kunmap,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001042 .kmap = ion_dma_buf_kmap,
1043 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001044};
1045
Johan Mossberg22ba4322013-12-13 14:24:34 -08001046struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1047 struct ion_handle *handle)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001048{
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001049 struct ion_buffer *buffer;
1050 struct dma_buf *dmabuf;
1051 bool valid_handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001052
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001053 mutex_lock(&client->lock);
1054 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001055 if (!valid_handle) {
Olav Haugana9bb0752013-12-13 14:23:54 -08001056 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Colin Cross83271f62013-12-13 14:24:59 -08001057 mutex_unlock(&client->lock);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001058 return ERR_PTR(-EINVAL);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001059 }
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001060 buffer = handle->buffer;
1061 ion_buffer_get(buffer);
Colin Cross83271f62013-12-13 14:24:59 -08001062 mutex_unlock(&client->lock);
1063
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001064 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1065 if (IS_ERR(dmabuf)) {
1066 ion_buffer_put(buffer);
Johan Mossberg22ba4322013-12-13 14:24:34 -08001067 return dmabuf;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001068 }
Johan Mossberg22ba4322013-12-13 14:24:34 -08001069
1070 return dmabuf;
1071}
1072EXPORT_SYMBOL(ion_share_dma_buf);
1073
1074int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1075{
1076 struct dma_buf *dmabuf;
1077 int fd;
1078
1079 dmabuf = ion_share_dma_buf(client, handle);
1080 if (IS_ERR(dmabuf))
1081 return PTR_ERR(dmabuf);
1082
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001083 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbott55808b82013-12-13 14:23:57 -08001084 if (fd < 0)
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001085 dma_buf_put(dmabuf);
Laura Abbott55808b82013-12-13 14:23:57 -08001086
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001087 return fd;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001088}
Johan Mossberg22ba4322013-12-13 14:24:34 -08001089EXPORT_SYMBOL(ion_share_dma_buf_fd);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001090
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001091struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1092{
1093 struct dma_buf *dmabuf;
1094 struct ion_buffer *buffer;
1095 struct ion_handle *handle;
Colin Cross47b40452013-12-13 14:24:50 -08001096 int ret;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001097
1098 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001099 if (IS_ERR(dmabuf))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001100 return ERR_PTR(PTR_ERR(dmabuf));
1101 /* if this memory came from ion */
1102
1103 if (dmabuf->ops != &dma_buf_ops) {
1104 pr_err("%s: can not import dmabuf from another exporter\n",
1105 __func__);
1106 dma_buf_put(dmabuf);
1107 return ERR_PTR(-EINVAL);
1108 }
1109 buffer = dmabuf->priv;
1110
1111 mutex_lock(&client->lock);
1112 /* if a handle exists for this buffer just take a reference to it */
1113 handle = ion_handle_lookup(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001114 if (!IS_ERR(handle)) {
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001115 ion_handle_get(handle);
Colin Cross83271f62013-12-13 14:24:59 -08001116 mutex_unlock(&client->lock);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001117 goto end;
1118 }
Colin Cross83271f62013-12-13 14:24:59 -08001119 mutex_unlock(&client->lock);
1120
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001121 handle = ion_handle_create(client, buffer);
Colin Cross9e907652013-12-13 14:24:49 -08001122 if (IS_ERR(handle))
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001123 goto end;
Colin Cross83271f62013-12-13 14:24:59 -08001124
1125 mutex_lock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001126 ret = ion_handle_add(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001127 mutex_unlock(&client->lock);
Colin Cross47b40452013-12-13 14:24:50 -08001128 if (ret) {
1129 ion_handle_put(handle);
1130 handle = ERR_PTR(ret);
1131 }
Colin Cross83271f62013-12-13 14:24:59 -08001132
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001133end:
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001134 dma_buf_put(dmabuf);
1135 return handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001136}
Olav Hauganee4c8aa2013-12-13 14:23:55 -08001137EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001138
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001139static int ion_sync_for_device(struct ion_client *client, int fd)
1140{
1141 struct dma_buf *dmabuf;
1142 struct ion_buffer *buffer;
1143
1144 dmabuf = dma_buf_get(fd);
Colin Cross9e907652013-12-13 14:24:49 -08001145 if (IS_ERR(dmabuf))
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001146 return PTR_ERR(dmabuf);
1147
1148 /* if this memory came from ion */
1149 if (dmabuf->ops != &dma_buf_ops) {
1150 pr_err("%s: can not sync dmabuf from another exporter\n",
1151 __func__);
1152 dma_buf_put(dmabuf);
1153 return -EINVAL;
1154 }
1155 buffer = dmabuf->priv;
Rebecca Schultz Zavin856661d2013-12-13 14:24:05 -08001156
1157 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1158 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001159 dma_buf_put(dmabuf);
1160 return 0;
1161}
1162
Colin Crossdb866e32013-12-13 19:26:16 -08001163/* fix up the cases where the ioctl direction bits are incorrect */
1164static unsigned int ion_ioctl_dir(unsigned int cmd)
1165{
1166 switch (cmd) {
1167 case ION_IOC_SYNC:
1168 case ION_IOC_FREE:
1169 case ION_IOC_CUSTOM:
1170 return _IOC_WRITE;
1171 default:
1172 return _IOC_DIR(cmd);
1173 }
1174}
1175
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001176static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1177{
1178 struct ion_client *client = filp->private_data;
Colin Crossdb866e32013-12-13 19:26:16 -08001179 struct ion_device *dev = client->dev;
1180 struct ion_handle *cleanup_handle = NULL;
1181 int ret = 0;
1182 unsigned int dir;
1183
1184 union {
1185 struct ion_fd_data fd;
1186 struct ion_allocation_data allocation;
1187 struct ion_handle_data handle;
1188 struct ion_custom_data custom;
1189 } data;
1190
1191 dir = ion_ioctl_dir(cmd);
1192
1193 if (_IOC_SIZE(cmd) > sizeof(data))
1194 return -EINVAL;
1195
1196 if (dir & _IOC_WRITE)
1197 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1198 return -EFAULT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001199
1200 switch (cmd) {
1201 case ION_IOC_ALLOC:
1202 {
Colin Cross47b40452013-12-13 14:24:50 -08001203 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001204
Colin Crossdb866e32013-12-13 19:26:16 -08001205 handle = ion_alloc(client, data.allocation.len,
1206 data.allocation.align,
1207 data.allocation.heap_id_mask,
1208 data.allocation.flags);
Colin Cross47b40452013-12-13 14:24:50 -08001209 if (IS_ERR(handle))
1210 return PTR_ERR(handle);
1211
Colin Crossdb866e32013-12-13 19:26:16 -08001212 data.allocation.handle = handle->id;
KyongHo Cho54ac07842013-12-13 14:23:39 -08001213
Colin Crossdb866e32013-12-13 19:26:16 -08001214 cleanup_handle = handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001215 break;
1216 }
1217 case ION_IOC_FREE:
1218 {
Colin Cross47b40452013-12-13 14:24:50 -08001219 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001220
Colin Crossdb866e32013-12-13 19:26:16 -08001221 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001222 if (IS_ERR(handle))
1223 return PTR_ERR(handle);
Colin Cross47b40452013-12-13 14:24:50 -08001224 ion_free(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001225 ion_handle_put(handle);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001226 break;
1227 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001228 case ION_IOC_SHARE:
Rebecca Schultz Zavindf0f6c72013-12-13 14:24:24 -08001229 case ION_IOC_MAP:
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001230 {
Colin Cross47b40452013-12-13 14:24:50 -08001231 struct ion_handle *handle;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001232
Colin Crossdb866e32013-12-13 19:26:16 -08001233 handle = ion_handle_get_by_id(client, data.handle.handle);
Colin Cross83271f62013-12-13 14:24:59 -08001234 if (IS_ERR(handle))
1235 return PTR_ERR(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001236 data.fd.fd = ion_share_dma_buf_fd(client, handle);
Colin Cross83271f62013-12-13 14:24:59 -08001237 ion_handle_put(handle);
Colin Crossdb866e32013-12-13 19:26:16 -08001238 if (data.fd.fd < 0)
1239 ret = data.fd.fd;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001240 break;
1241 }
1242 case ION_IOC_IMPORT:
1243 {
Colin Cross47b40452013-12-13 14:24:50 -08001244 struct ion_handle *handle;
Colin Crossdb866e32013-12-13 19:26:16 -08001245 handle = ion_import_dma_buf(client, data.fd.fd);
Colin Cross47b40452013-12-13 14:24:50 -08001246 if (IS_ERR(handle))
1247 ret = PTR_ERR(handle);
1248 else
Colin Crossdb866e32013-12-13 19:26:16 -08001249 data.handle.handle = handle->id;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001250 break;
1251 }
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001252 case ION_IOC_SYNC:
1253 {
Colin Crossdb866e32013-12-13 19:26:16 -08001254 ret = ion_sync_for_device(client, data.fd.fd);
Rebecca Schultz Zavin0b9ec1c2013-12-13 14:23:52 -08001255 break;
1256 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001257 case ION_IOC_CUSTOM:
1258 {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001259 if (!dev->custom_ioctl)
1260 return -ENOTTY;
Colin Crossdb866e32013-12-13 19:26:16 -08001261 ret = dev->custom_ioctl(client, data.custom.cmd,
1262 data.custom.arg);
1263 break;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001264 }
1265 default:
1266 return -ENOTTY;
1267 }
Colin Crossdb866e32013-12-13 19:26:16 -08001268
1269 if (dir & _IOC_READ) {
1270 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1271 if (cleanup_handle)
1272 ion_free(client, cleanup_handle);
1273 return -EFAULT;
1274 }
1275 }
1276 return ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001277}
1278
1279static int ion_release(struct inode *inode, struct file *file)
1280{
1281 struct ion_client *client = file->private_data;
1282
1283 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001284 ion_client_destroy(client);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001285 return 0;
1286}
1287
1288static int ion_open(struct inode *inode, struct file *file)
1289{
1290 struct miscdevice *miscdev = file->private_data;
1291 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1292 struct ion_client *client;
1293
1294 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001295 client = ion_client_create(dev, "user");
Colin Cross9e907652013-12-13 14:24:49 -08001296 if (IS_ERR(client))
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001297 return PTR_ERR(client);
1298 file->private_data = client;
1299
1300 return 0;
1301}
1302
1303static const struct file_operations ion_fops = {
1304 .owner = THIS_MODULE,
1305 .open = ion_open,
1306 .release = ion_release,
1307 .unlocked_ioctl = ion_ioctl,
Rom Lemarchand827c8492013-12-13 14:24:55 -08001308 .compat_ioctl = compat_ion_ioctl,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001309};
1310
1311static size_t ion_debug_heap_total(struct ion_client *client,
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001312 unsigned int id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001313{
1314 size_t size = 0;
1315 struct rb_node *n;
1316
1317 mutex_lock(&client->lock);
1318 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1319 struct ion_handle *handle = rb_entry(n,
1320 struct ion_handle,
1321 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001322 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001323 size += handle->buffer->size;
1324 }
1325 mutex_unlock(&client->lock);
1326 return size;
1327}
1328
1329static int ion_debug_heap_show(struct seq_file *s, void *unused)
1330{
1331 struct ion_heap *heap = s->private;
1332 struct ion_device *dev = heap->dev;
1333 struct rb_node *n;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001334 size_t total_size = 0;
1335 size_t total_orphaned_size = 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001336
1337 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001338 seq_printf(s, "----------------------------------------------------\n");
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001339
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001340 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001341 struct ion_client *client = rb_entry(n, struct ion_client,
1342 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001343 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001344 if (!size)
1345 continue;
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001346 if (client->task) {
1347 char task_comm[TASK_COMM_LEN];
1348
1349 get_task_comm(task_comm, client->task);
Colin Crosse61fc912013-12-13 19:26:14 -08001350 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001351 client->pid, size);
1352 } else {
Colin Crosse61fc912013-12-13 19:26:14 -08001353 seq_printf(s, "%16.s %16u %16zu\n", client->name,
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001354 client->pid, size);
1355 }
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001356 }
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001357 seq_printf(s, "----------------------------------------------------\n");
1358 seq_printf(s, "orphaned allocations (info is from last known client):"
1359 "\n");
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001360 mutex_lock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001361 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1362 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1363 node);
Rebecca Schultz Zavin2bb9f502013-12-13 14:24:30 -08001364 if (buffer->heap->id != heap->id)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001365 continue;
1366 total_size += buffer->size;
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001367 if (!buffer->handle_count) {
Colin Crosse61fc912013-12-13 19:26:14 -08001368 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1369 buffer->task_comm, buffer->pid,
1370 buffer->size, buffer->kmap_cnt,
Benjamin Gaignard092c3542013-12-13 14:24:22 -08001371 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001372 total_orphaned_size += buffer->size;
1373 }
1374 }
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001375 mutex_unlock(&dev->buffer_lock);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001376 seq_printf(s, "----------------------------------------------------\n");
Colin Crosse61fc912013-12-13 19:26:14 -08001377 seq_printf(s, "%16.s %16zu\n", "total orphaned",
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001378 total_orphaned_size);
Colin Crosse61fc912013-12-13 19:26:14 -08001379 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
Colin Cross2540c732013-12-13 14:24:47 -08001380 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
Colin Crosse61fc912013-12-13 19:26:14 -08001381 seq_printf(s, "%16.s %16zu\n", "deferred free",
Colin Cross2540c732013-12-13 14:24:47 -08001382 heap->free_list_size);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -08001383 seq_printf(s, "----------------------------------------------------\n");
1384
1385 if (heap->debug_show)
1386 heap->debug_show(heap, s, unused);
Rebecca Schultz Zavin5ad7bc32013-12-13 14:24:03 -08001387
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001388 return 0;
1389}
1390
1391static int ion_debug_heap_open(struct inode *inode, struct file *file)
1392{
1393 return single_open(file, ion_debug_heap_show, inode->i_private);
1394}
1395
1396static const struct file_operations debug_heap_fops = {
1397 .open = ion_debug_heap_open,
1398 .read = seq_read,
1399 .llseek = seq_lseek,
1400 .release = single_release,
1401};
1402
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001403#ifdef DEBUG_HEAP_SHRINKER
1404static int debug_shrink_set(void *data, u64 val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001405{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001406 struct ion_heap *heap = data;
1407 struct shrink_control sc;
1408 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001409
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001410 sc.gfp_mask = -1;
1411 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001412
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001413 if (!val)
1414 return 0;
1415
1416 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1417 sc.nr_to_scan = objs;
1418
1419 heap->shrinker.shrink(&heap->shrinker, &sc);
1420 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001421}
1422
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001423static int debug_shrink_get(void *data, u64 *val)
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001424{
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001425 struct ion_heap *heap = data;
1426 struct shrink_control sc;
1427 int objs;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001428
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001429 sc.gfp_mask = -1;
1430 sc.nr_to_scan = 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001431
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001432 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1433 *val = objs;
1434 return 0;
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001435}
1436
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001437DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1438 debug_shrink_set, "%llu\n");
1439#endif
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001440
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001441void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1442{
Rebecca Schultz Zavin29ae6bc2013-12-13 14:23:43 -08001443 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1444 !heap->ops->unmap_dma)
1445 pr_err("%s: can not add heap with invalid ops struct.\n",
1446 __func__);
1447
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001448 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1449 ion_heap_init_deferred_free(heap);
Rebecca Schultz Zavinfe2faea2013-12-13 14:24:35 -08001450
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001451 heap->dev = dev;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001452 down_write(&dev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001453 /* use negative heap->id to reverse the priority -- when traversing
1454 the list later attempt higher id numbers first */
1455 plist_node_init(&heap->node, -heap->id);
1456 plist_add(&heap->node, &dev->heaps);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001457 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1458 &debug_heap_fops);
Rebecca Schultz Zavinea313b52013-12-13 14:24:39 -08001459#ifdef DEBUG_HEAP_SHRINKER
1460 if (heap->shrinker.shrink) {
1461 char debug_name[64];
1462
1463 snprintf(debug_name, 64, "%s_shrink", heap->name);
1464 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1465 &debug_shrink_fops);
1466 }
1467#endif
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001468 up_write(&dev->lock);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001469}
1470
1471struct ion_device *ion_device_create(long (*custom_ioctl)
1472 (struct ion_client *client,
1473 unsigned int cmd,
1474 unsigned long arg))
1475{
1476 struct ion_device *idev;
1477 int ret;
1478
1479 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1480 if (!idev)
1481 return ERR_PTR(-ENOMEM);
1482
1483 idev->dev.minor = MISC_DYNAMIC_MINOR;
1484 idev->dev.name = "ion";
1485 idev->dev.fops = &ion_fops;
1486 idev->dev.parent = NULL;
1487 ret = misc_register(&idev->dev);
1488 if (ret) {
1489 pr_err("ion: failed to register misc device.\n");
1490 return ERR_PTR(ret);
1491 }
1492
1493 idev->debug_root = debugfs_create_dir("ion", NULL);
Colin Cross9e907652013-12-13 14:24:49 -08001494 if (!idev->debug_root)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001495 pr_err("ion: failed to create debug files.\n");
1496
1497 idev->custom_ioctl = custom_ioctl;
1498 idev->buffers = RB_ROOT;
Rebecca Schultz Zavin8d7ab9a2013-12-13 14:24:16 -08001499 mutex_init(&idev->buffer_lock);
1500 init_rwsem(&idev->lock);
Rebecca Schultz Zavincd694882013-12-13 14:24:25 -08001501 plist_head_init(&idev->heaps);
Rebecca Schultz Zavinb892bf72013-12-13 14:23:40 -08001502 idev->clients = RB_ROOT;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001503 return idev;
1504}
1505
1506void ion_device_destroy(struct ion_device *dev)
1507{
1508 misc_deregister(&dev->dev);
1509 /* XXX need to free the heaps and clients ? */
1510 kfree(dev);
1511}
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001512
1513void __init ion_reserve(struct ion_platform_data *data)
1514{
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001515 int i;
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001516
1517 for (i = 0; i < data->nr; i++) {
1518 if (data->heaps[i].size == 0)
1519 continue;
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001520
1521 if (data->heaps[i].base == 0) {
1522 phys_addr_t paddr;
1523 paddr = memblock_alloc_base(data->heaps[i].size,
1524 data->heaps[i].align,
1525 MEMBLOCK_ALLOC_ANYWHERE);
1526 if (!paddr) {
1527 pr_err("%s: error allocating memblock for "
1528 "heap %d\n",
1529 __func__, i);
1530 continue;
1531 }
1532 data->heaps[i].base = paddr;
1533 } else {
1534 int ret = memblock_reserve(data->heaps[i].base,
1535 data->heaps[i].size);
1536 if (ret)
Colin Crosse61fc912013-12-13 19:26:14 -08001537 pr_err("memblock reserve of %zx@%lx failed\n",
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001538 data->heaps[i].size,
1539 data->heaps[i].base);
1540 }
Colin Crosse61fc912013-12-13 19:26:14 -08001541 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
Rebecca Schultz Zavinfa9bba52013-12-13 14:24:23 -08001542 data->heaps[i].name,
1543 data->heaps[i].base,
1544 data->heaps[i].size);
Rebecca Schultz Zavin2991b7a2013-12-13 14:23:38 -08001545 }
1546}