blob: 5c7ab3ae07132ca2fcfc3ad263e54eb04f234476 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
25#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/mm.h>
27#include <linux/mm_types.h>
28#include <linux/rbtree.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/seq_file.h>
32#include <linux/uaccess.h>
33#include <linux/debugfs.h>
34
Laura Abbott8c017362011-09-22 20:59:12 -070035#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070036#include "ion_priv.h"
37#define DEBUG
38
39/**
40 * struct ion_device - the metadata of the ion device node
41 * @dev: the actual misc device
42 * @buffers: an rb tree of all the existing buffers
43 * @lock: lock protecting the buffers & heaps trees
44 * @heaps: list of all the heaps in the system
45 * @user_clients: list of all the clients created from userspace
46 */
47struct ion_device {
48 struct miscdevice dev;
49 struct rb_root buffers;
50 struct mutex lock;
51 struct rb_root heaps;
52 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 unsigned long arg);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070054 struct rb_root user_clients;
55 struct rb_root kernel_clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070056 struct dentry *debug_root;
57};
58
59/**
60 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070061 * @ref: for reference counting the client
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 * @node: node in the tree of all clients
63 * @dev: backpointer to ion device
64 * @handles: an rb tree of all the handles in this client
65 * @lock: lock protecting the tree of handles
66 * @heap_mask: mask of all supported heaps
67 * @name: used for debugging
68 * @task: used for debugging
69 *
70 * A client represents a list of buffers this client may access.
71 * The mutex stored here is used to protect both handles tree
72 * as well as the handles themselves, and should be held while modifying either.
73 */
74struct ion_client {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070075 struct kref ref;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080081 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070095 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070096 *
97 * Modifications to node, map_cnt or mapping should be protected by the
98 * lock in the client. Other fields are never changed after initialization.
99 */
100struct ion_handle {
101 struct kref ref;
102 struct ion_client *client;
103 struct ion_buffer *buffer;
104 struct rb_node node;
105 unsigned int kmap_cnt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700106 unsigned int dmap_cnt;
107 unsigned int usermap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700108 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700109};
110
Olav Hauganb3676592012-03-02 15:02:25 -0800111static void ion_iommu_release(struct kref *kref);
112
Laura Abbott8c017362011-09-22 20:59:12 -0700113static int ion_validate_buffer_flags(struct ion_buffer *buffer,
114 unsigned long flags)
115{
116 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
117 buffer->iommu_map_cnt) {
118 if (buffer->flags != flags) {
119 pr_err("%s: buffer was already mapped with flags %lx,"
120 " cannot map with flags %lx\n", __func__,
121 buffer->flags, flags);
122 return 1;
123 }
124
125 } else {
126 buffer->flags = flags;
127 }
128 return 0;
129}
130
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700131/* this function should only be called while dev->lock is held */
132static void ion_buffer_add(struct ion_device *dev,
133 struct ion_buffer *buffer)
134{
135 struct rb_node **p = &dev->buffers.rb_node;
136 struct rb_node *parent = NULL;
137 struct ion_buffer *entry;
138
139 while (*p) {
140 parent = *p;
141 entry = rb_entry(parent, struct ion_buffer, node);
142
143 if (buffer < entry) {
144 p = &(*p)->rb_left;
145 } else if (buffer > entry) {
146 p = &(*p)->rb_right;
147 } else {
148 pr_err("%s: buffer already found.", __func__);
149 BUG();
150 }
151 }
152
153 rb_link_node(&buffer->node, parent, p);
154 rb_insert_color(&buffer->node, &dev->buffers);
155}
156
Olav Haugan0fa9b602012-01-25 11:50:38 -0800157static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700158 struct ion_iommu_map *iommu)
159{
160 struct rb_node **p = &buffer->iommu_maps.rb_node;
161 struct rb_node *parent = NULL;
162 struct ion_iommu_map *entry;
163
164 while (*p) {
165 parent = *p;
166 entry = rb_entry(parent, struct ion_iommu_map, node);
167
168 if (iommu->key < entry->key) {
169 p = &(*p)->rb_left;
170 } else if (iommu->key > entry->key) {
171 p = &(*p)->rb_right;
172 } else {
173 pr_err("%s: buffer %p already has mapping for domain %d"
174 " and partition %d\n", __func__,
175 buffer,
176 iommu_map_domain(iommu),
177 iommu_map_partition(iommu));
178 BUG();
179 }
180 }
181
182 rb_link_node(&iommu->node, parent, p);
183 rb_insert_color(&iommu->node, &buffer->iommu_maps);
184
185}
186
187static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
188 unsigned int domain_no,
189 unsigned int partition_no)
190{
191 struct rb_node **p = &buffer->iommu_maps.rb_node;
192 struct rb_node *parent = NULL;
193 struct ion_iommu_map *entry;
194 uint64_t key = domain_no;
195 key = key << 32 | partition_no;
196
197 while (*p) {
198 parent = *p;
199 entry = rb_entry(parent, struct ion_iommu_map, node);
200
201 if (key < entry->key)
202 p = &(*p)->rb_left;
203 else if (key > entry->key)
204 p = &(*p)->rb_right;
205 else
206 return entry;
207 }
208
209 return NULL;
210}
211
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700212/* this function should only be called while dev->lock is held */
213static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
214 struct ion_device *dev,
215 unsigned long len,
216 unsigned long align,
217 unsigned long flags)
218{
219 struct ion_buffer *buffer;
220 int ret;
221
222 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
223 if (!buffer)
224 return ERR_PTR(-ENOMEM);
225
226 buffer->heap = heap;
227 kref_init(&buffer->ref);
228
229 ret = heap->ops->allocate(heap, buffer, len, align, flags);
230 if (ret) {
231 kfree(buffer);
232 return ERR_PTR(ret);
233 }
234 buffer->dev = dev;
235 buffer->size = len;
236 mutex_init(&buffer->lock);
237 ion_buffer_add(dev, buffer);
238 return buffer;
239}
240
Olav Hauganb3676592012-03-02 15:02:25 -0800241/**
242 * Check for delayed IOMMU unmapping. Also unmap any outstanding
243 * mappings which would otherwise have been leaked.
244 */
245static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
246{
247 struct ion_iommu_map *iommu_map;
248 struct rb_node *node;
249 const struct rb_root *rb = &(buffer->iommu_maps);
250 unsigned long ref_count;
251 unsigned int delayed_unmap;
252
253 mutex_lock(&buffer->lock);
254
255 while ((node = rb_first(rb)) != 0) {
256 iommu_map = rb_entry(node, struct ion_iommu_map, node);
257 ref_count = atomic_read(&iommu_map->ref.refcount);
258 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
259
260 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
261 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
262 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
263 iommu_map->domain_info[DI_PARTITION_NUM]);
264 }
265 /* set ref count to 1 to force release */
266 kref_init(&iommu_map->ref);
267 kref_put(&iommu_map->ref, ion_iommu_release);
268 }
269
270 mutex_unlock(&buffer->lock);
271}
272
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700273static void ion_buffer_destroy(struct kref *kref)
274{
275 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
276 struct ion_device *dev = buffer->dev;
277
Olav Hauganb3676592012-03-02 15:02:25 -0800278 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700279 buffer->heap->ops->free(buffer);
280 mutex_lock(&dev->lock);
281 rb_erase(&buffer->node, &dev->buffers);
282 mutex_unlock(&dev->lock);
283 kfree(buffer);
284}
285
286static void ion_buffer_get(struct ion_buffer *buffer)
287{
288 kref_get(&buffer->ref);
289}
290
291static int ion_buffer_put(struct ion_buffer *buffer)
292{
293 return kref_put(&buffer->ref, ion_buffer_destroy);
294}
295
296static struct ion_handle *ion_handle_create(struct ion_client *client,
297 struct ion_buffer *buffer)
298{
299 struct ion_handle *handle;
300
301 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
302 if (!handle)
303 return ERR_PTR(-ENOMEM);
304 kref_init(&handle->ref);
305 rb_init_node(&handle->node);
306 handle->client = client;
307 ion_buffer_get(buffer);
308 handle->buffer = buffer;
309
310 return handle;
311}
312
Laura Abbottec149ff2012-01-26 13:33:11 -0800313/* Client lock must be locked when calling */
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700314static void ion_handle_destroy(struct kref *kref)
315{
316 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700317 /* XXX Can a handle be destroyed while it's map count is non-zero?:
318 if (handle->map_cnt) unmap
319 */
Laura Abbottd2a87372011-10-20 17:53:49 -0700320 WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700321 ion_buffer_put(handle->buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700322 if (!RB_EMPTY_NODE(&handle->node))
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700323 rb_erase(&handle->node, &handle->client->handles);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700324 kfree(handle);
325}
326
327struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
328{
329 return handle->buffer;
330}
331
332static void ion_handle_get(struct ion_handle *handle)
333{
334 kref_get(&handle->ref);
335}
336
337static int ion_handle_put(struct ion_handle *handle)
338{
339 return kref_put(&handle->ref, ion_handle_destroy);
340}
341
342static struct ion_handle *ion_handle_lookup(struct ion_client *client,
343 struct ion_buffer *buffer)
344{
345 struct rb_node *n;
346
347 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
348 struct ion_handle *handle = rb_entry(n, struct ion_handle,
349 node);
350 if (handle->buffer == buffer)
351 return handle;
352 }
353 return NULL;
354}
355
356static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
357{
358 struct rb_node *n = client->handles.rb_node;
359
360 while (n) {
361 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
362 node);
363 if (handle < handle_node)
364 n = n->rb_left;
365 else if (handle > handle_node)
366 n = n->rb_right;
367 else
368 return true;
369 }
370 return false;
371}
372
373static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
374{
375 struct rb_node **p = &client->handles.rb_node;
376 struct rb_node *parent = NULL;
377 struct ion_handle *entry;
378
379 while (*p) {
380 parent = *p;
381 entry = rb_entry(parent, struct ion_handle, node);
382
383 if (handle < entry)
384 p = &(*p)->rb_left;
385 else if (handle > entry)
386 p = &(*p)->rb_right;
387 else
388 WARN(1, "%s: buffer already found.", __func__);
389 }
390
391 rb_link_node(&handle->node, parent, p);
392 rb_insert_color(&handle->node, &client->handles);
393}
394
395struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
396 size_t align, unsigned int flags)
397{
398 struct rb_node *n;
399 struct ion_handle *handle;
400 struct ion_device *dev = client->dev;
401 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800402 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800403 const unsigned int MAX_DBG_STR_LEN = 64;
404 char dbg_str[MAX_DBG_STR_LEN];
405 unsigned int dbg_str_idx = 0;
406
407 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700408
409 /*
410 * traverse the list of heaps available in this system in priority
411 * order. If the heap type is supported by the client, and matches the
412 * request of the caller allocate from it. Repeat until allocate has
413 * succeeded or all heaps have been tried
414 */
415 mutex_lock(&dev->lock);
416 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
417 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
418 /* if the client doesn't support this heap type */
419 if (!((1 << heap->type) & client->heap_mask))
420 continue;
421 /* if the caller didn't specify this heap type */
422 if (!((1 << heap->id) & flags))
423 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800424 /* Do not allow un-secure heap if secure is specified */
425 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
426 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700427 buffer = ion_buffer_create(heap, dev, len, align, flags);
428 if (!IS_ERR_OR_NULL(buffer))
429 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800430 if (dbg_str_idx < MAX_DBG_STR_LEN) {
431 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
432 int ret_value = snprintf(&dbg_str[dbg_str_idx],
433 len_left, "%s ", heap->name);
434 if (ret_value >= len_left) {
435 /* overflow */
436 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
437 dbg_str_idx = MAX_DBG_STR_LEN;
438 } else if (ret_value >= 0) {
439 dbg_str_idx += ret_value;
440 } else {
441 /* error */
442 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
443 }
444 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700445 }
446 mutex_unlock(&dev->lock);
447
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800448 if (IS_ERR_OR_NULL(buffer)) {
449 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
450 "0x%x) from heap(s) %sfor client %s with heap "
451 "mask 0x%x\n",
452 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700453 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800454 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700455
456 handle = ion_handle_create(client, buffer);
457
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700458 if (IS_ERR_OR_NULL(handle))
459 goto end;
460
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700461 /*
462 * ion_buffer_create will create a buffer with a ref_cnt of 1,
463 * and ion_handle_create will take a second reference, drop one here
464 */
465 ion_buffer_put(buffer);
466
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700467 mutex_lock(&client->lock);
468 ion_handle_add(client, handle);
469 mutex_unlock(&client->lock);
470 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700471
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700472end:
473 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700474 return handle;
475}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800476EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477
478void ion_free(struct ion_client *client, struct ion_handle *handle)
479{
480 bool valid_handle;
481
482 BUG_ON(client != handle->client);
483
484 mutex_lock(&client->lock);
485 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700486 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800487 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700488 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700489 return;
490 }
491 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -0800492 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700493}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800494EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700495
496static void ion_client_get(struct ion_client *client);
497static int ion_client_put(struct ion_client *client);
498
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700499static bool _ion_map(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700500{
501 bool map;
502
503 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
504
505 if (*buffer_cnt)
506 map = false;
507 else
508 map = true;
509 if (*handle_cnt == 0)
510 (*buffer_cnt)++;
511 (*handle_cnt)++;
512 return map;
513}
514
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700515static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700516{
517 BUG_ON(*handle_cnt == 0);
518 (*handle_cnt)--;
519 if (*handle_cnt != 0)
520 return false;
521 BUG_ON(*buffer_cnt == 0);
522 (*buffer_cnt)--;
523 if (*buffer_cnt == 0)
524 return true;
525 return false;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526}
527
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700528int ion_phys(struct ion_client *client, struct ion_handle *handle,
529 ion_phys_addr_t *addr, size_t *len)
530{
531 struct ion_buffer *buffer;
532 int ret;
533
534 mutex_lock(&client->lock);
535 if (!ion_handle_validate(client, handle)) {
536 mutex_unlock(&client->lock);
537 return -EINVAL;
538 }
539
540 buffer = handle->buffer;
541
542 if (!buffer->heap->ops->phys) {
543 pr_err("%s: ion_phys is not implemented by this heap.\n",
544 __func__);
545 mutex_unlock(&client->lock);
546 return -ENODEV;
547 }
548 mutex_unlock(&client->lock);
549 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
550 return ret;
551}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800552EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700553
Laura Abbott894fd582011-08-19 13:33:56 -0700554void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
555 unsigned long flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700556{
557 struct ion_buffer *buffer;
558 void *vaddr;
559
560 mutex_lock(&client->lock);
561 if (!ion_handle_validate(client, handle)) {
562 pr_err("%s: invalid handle passed to map_kernel.\n",
563 __func__);
564 mutex_unlock(&client->lock);
565 return ERR_PTR(-EINVAL);
566 }
567
568 buffer = handle->buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700569 mutex_lock(&buffer->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700570
571 if (!handle->buffer->heap->ops->map_kernel) {
572 pr_err("%s: map_kernel is not implemented by this heap.\n",
573 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700574 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700575 mutex_unlock(&client->lock);
576 return ERR_PTR(-ENODEV);
577 }
578
Laura Abbott8c017362011-09-22 20:59:12 -0700579 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700580 vaddr = ERR_PTR(-EEXIST);
581 goto out;
Laura Abbott894fd582011-08-19 13:33:56 -0700582 }
583
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700584 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700585 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
586 flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700587 if (IS_ERR_OR_NULL(vaddr))
588 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
589 buffer->vaddr = vaddr;
590 } else {
591 vaddr = buffer->vaddr;
592 }
Laura Abbott894fd582011-08-19 13:33:56 -0700593
594out:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700595 mutex_unlock(&buffer->lock);
596 mutex_unlock(&client->lock);
597 return vaddr;
598}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800599EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700600
Olav Hauganb3676592012-03-02 15:02:25 -0800601static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700602 int domain_num, int partition_num, unsigned long align,
603 unsigned long iova_length, unsigned long flags,
604 unsigned long *iova)
605{
606 struct ion_iommu_map *data;
607 int ret;
608
609 data = kmalloc(sizeof(*data), GFP_ATOMIC);
610
611 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800612 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700613
614 data->buffer = buffer;
615 iommu_map_domain(data) = domain_num;
616 iommu_map_partition(data) = partition_num;
617
618 ret = buffer->heap->ops->map_iommu(buffer, data,
619 domain_num,
620 partition_num,
621 align,
622 iova_length,
623 flags);
624
625 if (ret)
626 goto out;
627
628 kref_init(&data->ref);
629 *iova = data->iova_addr;
630
631 ion_iommu_add(buffer, data);
632
Olav Hauganb3676592012-03-02 15:02:25 -0800633 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700634
635out:
Laura Abbott8c017362011-09-22 20:59:12 -0700636 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800637 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700638}
639
640int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
641 int domain_num, int partition_num, unsigned long align,
642 unsigned long iova_length, unsigned long *iova,
643 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800644 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700645{
646 struct ion_buffer *buffer;
647 struct ion_iommu_map *iommu_map;
648 int ret = 0;
649
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800650 if (ION_IS_CACHED(flags)) {
651 pr_err("%s: Cannot map iommu as cached.\n", __func__);
652 return -EINVAL;
653 }
654
Laura Abbott8c017362011-09-22 20:59:12 -0700655 mutex_lock(&client->lock);
656 if (!ion_handle_validate(client, handle)) {
657 pr_err("%s: invalid handle passed to map_kernel.\n",
658 __func__);
659 mutex_unlock(&client->lock);
660 return -EINVAL;
661 }
662
663 buffer = handle->buffer;
664 mutex_lock(&buffer->lock);
665
666 if (!handle->buffer->heap->ops->map_iommu) {
667 pr_err("%s: map_iommu is not implemented by this heap.\n",
668 __func__);
669 ret = -ENODEV;
670 goto out;
671 }
672
Laura Abbott8c017362011-09-22 20:59:12 -0700673 /*
674 * If clients don't want a custom iova length, just use whatever
675 * the buffer size is
676 */
677 if (!iova_length)
678 iova_length = buffer->size;
679
680 if (buffer->size > iova_length) {
681 pr_debug("%s: iova length %lx is not at least buffer size"
682 " %x\n", __func__, iova_length, buffer->size);
683 ret = -EINVAL;
684 goto out;
685 }
686
687 if (buffer->size & ~PAGE_MASK) {
688 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
689 buffer->size, PAGE_SIZE);
690 ret = -EINVAL;
691 goto out;
692 }
693
694 if (iova_length & ~PAGE_MASK) {
695 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
696 iova_length, PAGE_SIZE);
697 ret = -EINVAL;
698 goto out;
699 }
700
701 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800702 _ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
703 if (!iommu_map) {
704 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
705 align, iova_length, flags, iova);
706 if (IS_ERR_OR_NULL(iommu_map)) {
Laura Abbott8c017362011-09-22 20:59:12 -0700707 _ion_unmap(&buffer->iommu_map_cnt,
708 &handle->iommu_map_cnt);
Olav Hauganb3676592012-03-02 15:02:25 -0800709 } else {
710 iommu_map->flags = iommu_flags;
711
712 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
713 kref_get(&iommu_map->ref);
714 }
Laura Abbott8c017362011-09-22 20:59:12 -0700715 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800716 if (iommu_map->flags != iommu_flags) {
717 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
718 __func__, handle,
719 iommu_map->flags, iommu_flags);
720 _ion_unmap(&buffer->iommu_map_cnt,
721 &handle->iommu_map_cnt);
722 ret = -EINVAL;
723 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700724 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800725 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700726 __func__, handle, iommu_map->mapped_size,
727 iova_length);
728 _ion_unmap(&buffer->iommu_map_cnt,
729 &handle->iommu_map_cnt);
730 ret = -EINVAL;
731 } else {
732 kref_get(&iommu_map->ref);
733 *iova = iommu_map->iova_addr;
734 }
735 }
736 *buffer_size = buffer->size;
737out:
738 mutex_unlock(&buffer->lock);
739 mutex_unlock(&client->lock);
740 return ret;
741}
742EXPORT_SYMBOL(ion_map_iommu);
743
744static void ion_iommu_release(struct kref *kref)
745{
746 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
747 ref);
748 struct ion_buffer *buffer = map->buffer;
749
750 rb_erase(&map->node, &buffer->iommu_maps);
751 buffer->heap->ops->unmap_iommu(map);
752 kfree(map);
753}
754
755void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
756 int domain_num, int partition_num)
757{
758 struct ion_iommu_map *iommu_map;
759 struct ion_buffer *buffer;
760
761 mutex_lock(&client->lock);
762 buffer = handle->buffer;
763
764 mutex_lock(&buffer->lock);
765
766 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
767
768 if (!iommu_map) {
769 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
770 domain_num, partition_num, buffer);
771 goto out;
772 }
773
774 _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
775 kref_put(&iommu_map->ref, ion_iommu_release);
776
777out:
778 mutex_unlock(&buffer->lock);
779
780 mutex_unlock(&client->lock);
781
782}
783EXPORT_SYMBOL(ion_unmap_iommu);
784
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700785struct scatterlist *ion_map_dma(struct ion_client *client,
Laura Abbott894fd582011-08-19 13:33:56 -0700786 struct ion_handle *handle,
787 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700788{
789 struct ion_buffer *buffer;
790 struct scatterlist *sglist;
791
792 mutex_lock(&client->lock);
793 if (!ion_handle_validate(client, handle)) {
794 pr_err("%s: invalid handle passed to map_dma.\n",
795 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700796 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700797 return ERR_PTR(-EINVAL);
798 }
799 buffer = handle->buffer;
800 mutex_lock(&buffer->lock);
801
802 if (!handle->buffer->heap->ops->map_dma) {
803 pr_err("%s: map_kernel is not implemented by this heap.\n",
804 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700805 mutex_unlock(&buffer->lock);
806 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700807 return ERR_PTR(-ENODEV);
808 }
Laura Abbott894fd582011-08-19 13:33:56 -0700809
Laura Abbott8c017362011-09-22 20:59:12 -0700810 if (ion_validate_buffer_flags(buffer, flags)) {
811 sglist = ERR_PTR(-EEXIST);
812 goto out;
Laura Abbott894fd582011-08-19 13:33:56 -0700813 }
814
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700815 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
816 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
817 if (IS_ERR_OR_NULL(sglist))
818 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
819 buffer->sglist = sglist;
820 } else {
821 sglist = buffer->sglist;
822 }
Laura Abbott894fd582011-08-19 13:33:56 -0700823
824out:
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700825 mutex_unlock(&buffer->lock);
826 mutex_unlock(&client->lock);
827 return sglist;
828}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800829EXPORT_SYMBOL(ion_map_dma);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700830
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700831void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
832{
833 struct ion_buffer *buffer;
834
835 mutex_lock(&client->lock);
836 buffer = handle->buffer;
837 mutex_lock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700838 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
839 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
840 buffer->vaddr = NULL;
841 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700842 mutex_unlock(&buffer->lock);
843 mutex_unlock(&client->lock);
844}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800845EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700846
847void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
848{
849 struct ion_buffer *buffer;
850
851 mutex_lock(&client->lock);
852 buffer = handle->buffer;
853 mutex_lock(&buffer->lock);
854 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
855 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
856 buffer->sglist = NULL;
857 }
858 mutex_unlock(&buffer->lock);
859 mutex_unlock(&client->lock);
860}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800861EXPORT_SYMBOL(ion_unmap_dma);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700862
863struct ion_buffer *ion_share(struct ion_client *client,
864 struct ion_handle *handle)
865{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700866 bool valid_handle;
867
868 mutex_lock(&client->lock);
869 valid_handle = ion_handle_validate(client, handle);
870 mutex_unlock(&client->lock);
871 if (!valid_handle) {
872 WARN("%s: invalid handle passed to share.\n", __func__);
873 return ERR_PTR(-EINVAL);
874 }
875
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700876 /* do not take an extra reference here, the burden is on the caller
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700877 * to make sure the buffer doesn't go away while it's passing it
878 * to another client -- ion_free should not be called on this handle
879 * until the buffer has been imported into the other client
880 */
881 return handle->buffer;
882}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800883EXPORT_SYMBOL(ion_share);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700884
885struct ion_handle *ion_import(struct ion_client *client,
886 struct ion_buffer *buffer)
887{
888 struct ion_handle *handle = NULL;
889
890 mutex_lock(&client->lock);
891 /* if a handle exists for this buffer just take a reference to it */
892 handle = ion_handle_lookup(client, buffer);
893 if (!IS_ERR_OR_NULL(handle)) {
894 ion_handle_get(handle);
895 goto end;
896 }
897 handle = ion_handle_create(client, buffer);
898 if (IS_ERR_OR_NULL(handle))
899 goto end;
900 ion_handle_add(client, handle);
901end:
902 mutex_unlock(&client->lock);
903 return handle;
904}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800905EXPORT_SYMBOL(ion_import);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700906
Laura Abbottabcb6f72011-10-04 16:26:49 -0700907static int check_vaddr_bounds(unsigned long start, unsigned long end)
908{
909 struct mm_struct *mm = current->active_mm;
910 struct vm_area_struct *vma;
911 int ret = 1;
912
913 if (end < start)
914 goto out;
915
916 down_read(&mm->mmap_sem);
917 vma = find_vma(mm, start);
918 if (vma && vma->vm_start < end) {
919 if (start < vma->vm_start)
920 goto out_up;
921 if (end > vma->vm_end)
922 goto out_up;
923 ret = 0;
924 }
925
926out_up:
927 up_read(&mm->mmap_sem);
928out:
929 return ret;
930}
931
Olav Haugan41f85792012-02-08 15:28:05 -0800932int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700933 void *uaddr, unsigned long offset, unsigned long len,
934 unsigned int cmd)
935{
936 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700937 int ret = -EINVAL;
938
939 mutex_lock(&client->lock);
940 if (!ion_handle_validate(client, handle)) {
941 pr_err("%s: invalid handle passed to do_cache_op.\n",
942 __func__);
943 mutex_unlock(&client->lock);
944 return -EINVAL;
945 }
946 buffer = handle->buffer;
947 mutex_lock(&buffer->lock);
948
Laura Abbottcbaa6682011-10-19 12:14:14 -0700949 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700950 ret = 0;
951 goto out;
952 }
953
954 if (!handle->buffer->heap->ops->cache_op) {
955 pr_err("%s: cache_op is not implemented by this heap.\n",
956 __func__);
957 ret = -ENODEV;
958 goto out;
959 }
960
Laura Abbottabcb6f72011-10-04 16:26:49 -0700961
962 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
963 offset, len, cmd);
964
965out:
966 mutex_unlock(&buffer->lock);
967 mutex_unlock(&client->lock);
968 return ret;
969
970}
971
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700972static const struct file_operations ion_share_fops;
973
974struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
975{
976 struct file *file = fget(fd);
977 struct ion_handle *handle;
978
979 if (!file) {
980 pr_err("%s: imported fd not found in file table.\n", __func__);
981 return ERR_PTR(-EINVAL);
982 }
983 if (file->f_op != &ion_share_fops) {
Laura Abbott084d6eb2011-10-24 19:09:50 -0700984 pr_err("%s: imported file %s is not a shared ion"
985 " file.", __func__, file->f_dentry->d_name.name);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700986 handle = ERR_PTR(-EINVAL);
987 goto end;
988 }
989 handle = ion_import(client, file->private_data);
990end:
991 fput(file);
992 return handle;
993}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800994EXPORT_SYMBOL(ion_import_fd);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700995
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700996static int ion_debug_client_show(struct seq_file *s, void *unused)
997{
998 struct ion_client *client = s->private;
999 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -07001000 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001001
Olav Haugan854c9e12012-05-16 16:34:28 -07001002 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
1003 "heap_name", "size_in_bytes", "handle refcount",
1004 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001005
1006 mutex_lock(&client->lock);
1007 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1008 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1009 node);
1010 enum ion_heap_type type = handle->buffer->heap->type;
1011
Olav Haugan854c9e12012-05-16 16:34:28 -07001012 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -07001013 handle->buffer->heap->name,
1014 handle->buffer->size,
1015 atomic_read(&handle->ref.refcount),
1016 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -07001017
1018 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
1019 type == ION_HEAP_TYPE_CARVEOUT ||
1020 type == ION_HEAP_TYPE_CP)
1021 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
1022 else
1023 seq_printf(s, " : %12s", "N/A");
1024
1025 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
1026 n2 = rb_next(n2)) {
1027 struct ion_iommu_map *imap =
1028 rb_entry(n2, struct ion_iommu_map, node);
1029 seq_printf(s, " : [%d,%d] - %8lx",
1030 imap->domain_info[DI_DOMAIN_NUM],
1031 imap->domain_info[DI_PARTITION_NUM],
1032 imap->iova_addr);
1033 }
1034 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001035 }
Laura Abbott68c80642011-10-21 17:32:27 -07001036
1037 seq_printf(s, "%16.16s %d\n", "client refcount:",
1038 atomic_read(&client->ref.refcount));
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039 mutex_unlock(&client->lock);
1040
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001041 return 0;
1042}
1043
1044static int ion_debug_client_open(struct inode *inode, struct file *file)
1045{
1046 return single_open(file, ion_debug_client_show, inode->i_private);
1047}
1048
1049static const struct file_operations debug_client_fops = {
1050 .open = ion_debug_client_open,
1051 .read = seq_read,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001056static struct ion_client *ion_client_lookup(struct ion_device *dev,
1057 struct task_struct *task)
1058{
1059 struct rb_node *n = dev->user_clients.rb_node;
1060 struct ion_client *client;
1061
1062 mutex_lock(&dev->lock);
1063 while (n) {
1064 client = rb_entry(n, struct ion_client, node);
1065 if (task == client->task) {
1066 ion_client_get(client);
1067 mutex_unlock(&dev->lock);
1068 return client;
1069 } else if (task < client->task) {
1070 n = n->rb_left;
1071 } else if (task > client->task) {
1072 n = n->rb_right;
1073 }
1074 }
1075 mutex_unlock(&dev->lock);
1076 return NULL;
1077}
1078
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001079struct ion_client *ion_client_create(struct ion_device *dev,
1080 unsigned int heap_mask,
1081 const char *name)
1082{
1083 struct ion_client *client;
1084 struct task_struct *task;
1085 struct rb_node **p;
1086 struct rb_node *parent = NULL;
1087 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001088 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -07001089 unsigned int name_len;
1090
1091 if (!name) {
1092 pr_err("%s: Name cannot be null\n", __func__);
1093 return ERR_PTR(-EINVAL);
1094 }
1095 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001096
1097 get_task_struct(current->group_leader);
1098 task_lock(current->group_leader);
1099 pid = task_pid_nr(current->group_leader);
1100 /* don't bother to store task struct for kernel threads,
1101 they can't be killed anyway */
1102 if (current->group_leader->flags & PF_KTHREAD) {
1103 put_task_struct(current->group_leader);
1104 task = NULL;
1105 } else {
1106 task = current->group_leader;
1107 }
1108 task_unlock(current->group_leader);
1109
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001110 /* if this isn't a kernel thread, see if a client already
1111 exists */
1112 if (task) {
1113 client = ion_client_lookup(dev, task);
1114 if (!IS_ERR_OR_NULL(client)) {
1115 put_task_struct(current->group_leader);
1116 return client;
1117 }
1118 }
1119
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001120 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1121 if (!client) {
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -07001122 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001123 return ERR_PTR(-ENOMEM);
1124 }
1125
1126 client->dev = dev;
1127 client->handles = RB_ROOT;
1128 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001129
Olav Haugan6625c7d12012-01-24 13:50:43 -08001130 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001131 if (!client->name) {
1132 put_task_struct(current->group_leader);
1133 kfree(client);
1134 return ERR_PTR(-ENOMEM);
1135 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -08001136 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001137 }
1138
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001139 client->heap_mask = heap_mask;
1140 client->task = task;
1141 client->pid = pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001142 kref_init(&client->ref);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001143
1144 mutex_lock(&dev->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001145 if (task) {
1146 p = &dev->user_clients.rb_node;
1147 while (*p) {
1148 parent = *p;
1149 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001150
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001151 if (task < entry->task)
1152 p = &(*p)->rb_left;
1153 else if (task > entry->task)
1154 p = &(*p)->rb_right;
1155 }
1156 rb_link_node(&client->node, parent, p);
1157 rb_insert_color(&client->node, &dev->user_clients);
1158 } else {
1159 p = &dev->kernel_clients.rb_node;
1160 while (*p) {
1161 parent = *p;
1162 entry = rb_entry(parent, struct ion_client, node);
1163
1164 if (client < entry)
1165 p = &(*p)->rb_left;
1166 else if (client > entry)
1167 p = &(*p)->rb_right;
1168 }
1169 rb_link_node(&client->node, parent, p);
1170 rb_insert_color(&client->node, &dev->kernel_clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001171 }
1172
Laura Abbotteed86032011-12-05 15:32:36 -08001173
1174 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001175 dev->debug_root, client,
1176 &debug_client_fops);
1177 mutex_unlock(&dev->lock);
1178
1179 return client;
1180}
1181
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001182static void _ion_client_destroy(struct kref *kref)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001183{
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001184 struct ion_client *client = container_of(kref, struct ion_client, ref);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001185 struct ion_device *dev = client->dev;
1186 struct rb_node *n;
1187
1188 pr_debug("%s: %d\n", __func__, __LINE__);
1189 while ((n = rb_first(&client->handles))) {
1190 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1191 node);
1192 ion_handle_destroy(&handle->ref);
1193 }
1194 mutex_lock(&dev->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001195 if (client->task) {
1196 rb_erase(&client->node, &dev->user_clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001197 put_task_struct(client->task);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001198 } else {
1199 rb_erase(&client->node, &dev->kernel_clients);
1200 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001201 debugfs_remove_recursive(client->debug_root);
1202 mutex_unlock(&dev->lock);
1203
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001204 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001205 kfree(client);
1206}
1207
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001208static void ion_client_get(struct ion_client *client)
1209{
1210 kref_get(&client->ref);
1211}
1212
1213static int ion_client_put(struct ion_client *client)
1214{
1215 return kref_put(&client->ref, _ion_client_destroy);
1216}
1217
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001218void ion_client_destroy(struct ion_client *client)
1219{
Jordan Crousea75022c2011-10-12 16:57:47 -06001220 if (client)
1221 ion_client_put(client);
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001222}
Olav Hauganbd2b6922012-01-25 09:28:55 -08001223EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -07001224
Laura Abbott273dd8e2011-10-12 14:26:33 -07001225int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1226 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001227{
1228 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001229
1230 mutex_lock(&client->lock);
1231 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001232 pr_err("%s: invalid handle passed to %s.\n",
1233 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001234 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001235 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001236 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001237 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001238 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001239 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001240 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001241 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001242
Laura Abbott273dd8e2011-10-12 14:26:33 -07001243 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001244}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001245EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001246
Laura Abbott8c017362011-09-22 20:59:12 -07001247int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1248 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001249{
Laura Abbott8c017362011-09-22 20:59:12 -07001250 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001251
Laura Abbott8c017362011-09-22 20:59:12 -07001252 mutex_lock(&client->lock);
1253 if (!ion_handle_validate(client, handle)) {
1254 pr_err("%s: invalid handle passed to %s.\n",
1255 __func__, __func__);
1256 mutex_unlock(&client->lock);
1257 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001258 }
Laura Abbott8c017362011-09-22 20:59:12 -07001259 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001260 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001261 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001262 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001263 mutex_unlock(&client->lock);
1264
1265 return 0;
1266}
1267EXPORT_SYMBOL(ion_handle_get_size);
1268
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001269static int ion_share_release(struct inode *inode, struct file* file)
1270{
1271 struct ion_buffer *buffer = file->private_data;
1272
1273 pr_debug("%s: %d\n", __func__, __LINE__);
1274 /* drop the reference to the buffer -- this prevents the
1275 buffer from going away because the client holding it exited
1276 while it was being passed */
1277 ion_buffer_put(buffer);
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001278 return 0;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001279}
1280
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001281static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001282{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001283
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001284 struct ion_buffer *buffer = vma->vm_file->private_data;
1285 struct ion_handle *handle = vma->vm_private_data;
1286 struct ion_client *client;
1287
1288 pr_debug("%s: %d\n", __func__, __LINE__);
1289 /* check that the client still exists and take a reference so
1290 it can't go away until this vma is closed */
1291 client = ion_client_lookup(buffer->dev, current->group_leader);
1292 if (IS_ERR_OR_NULL(client)) {
1293 vma->vm_private_data = NULL;
1294 return;
1295 }
Laura Abbott0f2175b2011-12-09 14:26:07 -08001296 ion_handle_get(handle);
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001297 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001298 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001299 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001300 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1301 __func__, __LINE__,
1302 atomic_read(&client->ref.refcount),
1303 atomic_read(&handle->ref.refcount),
1304 atomic_read(&buffer->ref.refcount));
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001305}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001306
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001307static void ion_vma_close(struct vm_area_struct *vma)
1308{
1309 struct ion_handle *handle = vma->vm_private_data;
1310 struct ion_buffer *buffer = vma->vm_file->private_data;
1311 struct ion_client *client;
1312
1313 pr_debug("%s: %d\n", __func__, __LINE__);
1314 /* this indicates the client is gone, nothing to do here */
1315 if (!handle)
1316 return;
1317 client = handle->client;
Laura Abbott77168502011-12-05 11:06:24 -08001318 mutex_lock(&buffer->lock);
1319 buffer->umap_cnt--;
1320 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001321
1322 if (buffer->heap->ops->unmap_user)
1323 buffer->heap->ops->unmap_user(buffer->heap, buffer);
1324
1325
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001326 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1327 __func__, __LINE__,
1328 atomic_read(&client->ref.refcount),
1329 atomic_read(&handle->ref.refcount),
1330 atomic_read(&buffer->ref.refcount));
Laura Abbottec149ff2012-01-26 13:33:11 -08001331 mutex_lock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001332 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -08001333 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001334 ion_client_put(client);
1335 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1336 __func__, __LINE__,
1337 atomic_read(&client->ref.refcount),
1338 atomic_read(&handle->ref.refcount),
1339 atomic_read(&buffer->ref.refcount));
1340}
1341
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001342static struct vm_operations_struct ion_vm_ops = {
1343 .open = ion_vma_open,
1344 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001345};
1346
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001347static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001348{
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001349 struct ion_buffer *buffer = file->private_data;
1350 unsigned long size = vma->vm_end - vma->vm_start;
1351 struct ion_client *client;
1352 struct ion_handle *handle;
1353 int ret;
Laura Abbott894fd582011-08-19 13:33:56 -07001354 unsigned long flags = file->f_flags & O_DSYNC ?
1355 ION_SET_CACHE(UNCACHED) :
1356 ION_SET_CACHE(CACHED);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001357
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001358
1359 pr_debug("%s: %d\n", __func__, __LINE__);
1360 /* make sure the client still exists, it's possible for the client to
1361 have gone away but the map/share fd still to be around, take
1362 a reference to it so it can't go away while this mapping exists */
1363 client = ion_client_lookup(buffer->dev, current->group_leader);
1364 if (IS_ERR_OR_NULL(client)) {
1365 pr_err("%s: trying to mmap an ion handle in a process with no "
1366 "ion client\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001367 return -EINVAL;
1368 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001369
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001370 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
1371 buffer->size)) {
1372 pr_err("%s: trying to map larger area than handle has available"
1373 "\n", __func__);
1374 ret = -EINVAL;
1375 goto err;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001376 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001377
1378 /* find the handle and take a reference to it */
1379 handle = ion_import(client, buffer);
1380 if (IS_ERR_OR_NULL(handle)) {
1381 ret = -EINVAL;
1382 goto err;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001383 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001384
1385 if (!handle->buffer->heap->ops->map_user) {
1386 pr_err("%s: this heap does not define a method for mapping "
1387 "to userspace\n", __func__);
1388 ret = -EINVAL;
1389 goto err1;
1390 }
1391
1392 mutex_lock(&buffer->lock);
Laura Abbott894fd582011-08-19 13:33:56 -07001393
Laura Abbott8c017362011-09-22 20:59:12 -07001394 if (ion_validate_buffer_flags(buffer, flags)) {
1395 ret = -EEXIST;
1396 mutex_unlock(&buffer->lock);
1397 goto err1;
Laura Abbott894fd582011-08-19 13:33:56 -07001398 }
Laura Abbott8c017362011-09-22 20:59:12 -07001399
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001400 /* now map it to userspace */
Laura Abbott894fd582011-08-19 13:33:56 -07001401 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
1402 flags);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001403
1404 buffer->umap_cnt++;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001405 if (ret) {
1406 pr_err("%s: failure mapping buffer to userspace\n",
1407 __func__);
Laura Abbott894fd582011-08-19 13:33:56 -07001408 goto err2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001409 }
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001410 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001411
1412 vma->vm_ops = &ion_vm_ops;
1413 /* move the handle into the vm_private_data so we can access it from
1414 vma_open/close */
1415 vma->vm_private_data = handle;
1416 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
1417 __func__, __LINE__,
1418 atomic_read(&client->ref.refcount),
1419 atomic_read(&handle->ref.refcount),
1420 atomic_read(&buffer->ref.refcount));
1421 return 0;
1422
Laura Abbott894fd582011-08-19 13:33:56 -07001423err2:
1424 buffer->umap_cnt--;
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001425 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001426 /* drop the reference to the handle */
Laura Abbott894fd582011-08-19 13:33:56 -07001427err1:
Laura Abbottec149ff2012-01-26 13:33:11 -08001428 mutex_lock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001429 ion_handle_put(handle);
Laura Abbottec149ff2012-01-26 13:33:11 -08001430 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001431err:
Iliyan Malchev3fe24362011-08-09 14:42:08 -07001432 /* drop the reference to the client */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001433 ion_client_put(client);
1434 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001435}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001436
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001437static const struct file_operations ion_share_fops = {
1438 .owner = THIS_MODULE,
1439 .release = ion_share_release,
1440 .mmap = ion_share_mmap,
1441};
1442
1443static int ion_ioctl_share(struct file *parent, struct ion_client *client,
1444 struct ion_handle *handle)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001445{
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001446 int fd = get_unused_fd();
1447 struct file *file;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001448
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001449 if (fd < 0)
1450 return -ENFILE;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001451
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001452 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
1453 handle->buffer, O_RDWR);
1454 if (IS_ERR_OR_NULL(file))
1455 goto err;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001456
Laura Abbott4b5d0482011-09-27 18:35:14 -07001457 if (parent->f_flags & O_DSYNC)
1458 file->f_flags |= O_DSYNC;
1459
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001460 ion_buffer_get(handle->buffer);
1461 fd_install(fd, file);
1462
1463 return fd;
1464
1465err:
1466 put_unused_fd(fd);
1467 return -ENFILE;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001468}
1469
1470static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1471{
1472 struct ion_client *client = filp->private_data;
1473
1474 switch (cmd) {
1475 case ION_IOC_ALLOC:
1476 {
1477 struct ion_allocation_data data;
1478
1479 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1480 return -EFAULT;
1481 data.handle = ion_alloc(client, data.len, data.align,
1482 data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001483
Laura Abbotte1b9ce52011-11-11 18:31:39 -08001484 if (IS_ERR_OR_NULL(data.handle))
Olav Hauganb06ee072011-12-13 15:31:41 -08001485 return -ENOMEM;
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001486
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001487 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001488 return -EFAULT;
1489 break;
1490 }
1491 case ION_IOC_FREE:
1492 {
1493 struct ion_handle_data data;
1494 bool valid;
1495
1496 if (copy_from_user(&data, (void __user *)arg,
1497 sizeof(struct ion_handle_data)))
1498 return -EFAULT;
1499 mutex_lock(&client->lock);
1500 valid = ion_handle_validate(client, data.handle);
1501 mutex_unlock(&client->lock);
1502 if (!valid)
1503 return -EINVAL;
1504 ion_free(client, data.handle);
1505 break;
1506 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001507 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001508 case ION_IOC_SHARE:
1509 {
1510 struct ion_fd_data data;
1511
1512 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1513 return -EFAULT;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001514 mutex_lock(&client->lock);
1515 if (!ion_handle_validate(client, data.handle)) {
1516 pr_err("%s: invalid handle passed to share ioctl.\n",
1517 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001518 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001519 return -EINVAL;
1520 }
1521 data.fd = ion_ioctl_share(filp, client, data.handle);
1522 mutex_unlock(&client->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001523 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1524 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001525 if (data.fd < 0)
1526 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001527 break;
1528 }
1529 case ION_IOC_IMPORT:
1530 {
1531 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001532 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001533 if (copy_from_user(&data, (void __user *)arg,
1534 sizeof(struct ion_fd_data)))
1535 return -EFAULT;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001536
1537 data.handle = ion_import_fd(client, data.fd);
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001538 if (IS_ERR(data.handle)) {
1539 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001540 data.handle = NULL;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001541 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001542 if (copy_to_user((void __user *)arg, &data,
1543 sizeof(struct ion_fd_data)))
1544 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001545 if (ret < 0)
1546 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001547 break;
1548 }
1549 case ION_IOC_CUSTOM:
1550 {
1551 struct ion_device *dev = client->dev;
1552 struct ion_custom_data data;
1553
1554 if (!dev->custom_ioctl)
1555 return -ENOTTY;
1556 if (copy_from_user(&data, (void __user *)arg,
1557 sizeof(struct ion_custom_data)))
1558 return -EFAULT;
1559 return dev->custom_ioctl(client, data.cmd, data.arg);
1560 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001561 case ION_IOC_CLEAN_CACHES:
1562 case ION_IOC_INV_CACHES:
1563 case ION_IOC_CLEAN_INV_CACHES:
1564 {
1565 struct ion_flush_data data;
Laura Abbott9fa29e82011-11-14 09:42:53 -08001566 unsigned long start, end;
Laura Abbotte80ea012011-11-18 18:36:47 -08001567 struct ion_handle *handle = NULL;
1568 int ret;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001569
1570 if (copy_from_user(&data, (void __user *)arg,
1571 sizeof(struct ion_flush_data)))
1572 return -EFAULT;
1573
Laura Abbott9fa29e82011-11-14 09:42:53 -08001574 start = (unsigned long) data.vaddr;
1575 end = (unsigned long) data.vaddr + data.length;
1576
1577 if (check_vaddr_bounds(start, end)) {
1578 pr_err("%s: virtual address %p is out of bounds\n",
1579 __func__, data.vaddr);
1580 return -EINVAL;
1581 }
1582
Laura Abbotte80ea012011-11-18 18:36:47 -08001583 if (!data.handle) {
1584 handle = ion_import_fd(client, data.fd);
1585 if (IS_ERR_OR_NULL(handle)) {
1586 pr_info("%s: Could not import handle: %d\n",
1587 __func__, (int)handle);
1588 return -EINVAL;
1589 }
1590 }
1591
1592 ret = ion_do_cache_op(client,
1593 data.handle ? data.handle : handle,
1594 data.vaddr, data.offset, data.length,
1595 cmd);
1596
1597 if (!data.handle)
1598 ion_free(client, handle);
1599
Olav Haugand7baec02012-05-15 14:38:09 -07001600 if (ret < 0)
1601 return ret;
Laura Abbotte80ea012011-11-18 18:36:47 -08001602 break;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001603
1604 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001605 case ION_IOC_GET_FLAGS:
1606 {
1607 struct ion_flag_data data;
1608 int ret;
1609 if (copy_from_user(&data, (void __user *)arg,
1610 sizeof(struct ion_flag_data)))
1611 return -EFAULT;
1612
1613 ret = ion_handle_get_flags(client, data.handle, &data.flags);
1614 if (ret < 0)
1615 return ret;
1616 if (copy_to_user((void __user *)arg, &data,
1617 sizeof(struct ion_flag_data)))
1618 return -EFAULT;
1619 break;
1620 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001621 default:
1622 return -ENOTTY;
1623 }
1624 return 0;
1625}
1626
1627static int ion_release(struct inode *inode, struct file *file)
1628{
1629 struct ion_client *client = file->private_data;
1630
1631 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001632 ion_client_put(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001633 return 0;
1634}
1635
1636static int ion_open(struct inode *inode, struct file *file)
1637{
1638 struct miscdevice *miscdev = file->private_data;
1639 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1640 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001641 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001642
1643 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001644 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1645 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001646 if (IS_ERR_OR_NULL(client))
1647 return PTR_ERR(client);
1648 file->private_data = client;
1649
1650 return 0;
1651}
1652
1653static const struct file_operations ion_fops = {
1654 .owner = THIS_MODULE,
1655 .open = ion_open,
1656 .release = ion_release,
1657 .unlocked_ioctl = ion_ioctl,
1658};
1659
1660static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001661 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001662{
1663 size_t size = 0;
1664 struct rb_node *n;
1665
1666 mutex_lock(&client->lock);
1667 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1668 struct ion_handle *handle = rb_entry(n,
1669 struct ion_handle,
1670 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001671 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001672 size += handle->buffer->size;
1673 }
1674 mutex_unlock(&client->lock);
1675 return size;
1676}
1677
Olav Haugan0671b9a2012-05-25 11:58:56 -07001678/**
1679 * Searches through a clients handles to find if the buffer is owned
1680 * by this client. Used for debug output.
1681 * @param client pointer to candidate owner of buffer
1682 * @param buf pointer to buffer that we are trying to find the owner of
1683 * @return 1 if found, 0 otherwise
1684 */
1685static int ion_debug_find_buffer_owner(const struct ion_client *client,
1686 const struct ion_buffer *buf)
1687{
1688 struct rb_node *n;
1689
1690 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1691 const struct ion_handle *handle = rb_entry(n,
1692 const struct ion_handle,
1693 node);
1694 if (handle->buffer == buf)
1695 return 1;
1696 }
1697 return 0;
1698}
1699
1700/**
1701 * Adds mem_map_data pointer to the tree of mem_map
1702 * Used for debug output.
1703 * @param mem_map The mem_map tree
1704 * @param data The new data to add to the tree
1705 */
1706static void ion_debug_mem_map_add(struct rb_root *mem_map,
1707 struct mem_map_data *data)
1708{
1709 struct rb_node **p = &mem_map->rb_node;
1710 struct rb_node *parent = NULL;
1711 struct mem_map_data *entry;
1712
1713 while (*p) {
1714 parent = *p;
1715 entry = rb_entry(parent, struct mem_map_data, node);
1716
1717 if (data->addr < entry->addr) {
1718 p = &(*p)->rb_left;
1719 } else if (data->addr > entry->addr) {
1720 p = &(*p)->rb_right;
1721 } else {
1722 pr_err("%s: mem_map_data already found.", __func__);
1723 BUG();
1724 }
1725 }
1726 rb_link_node(&data->node, parent, p);
1727 rb_insert_color(&data->node, mem_map);
1728}
1729
1730/**
1731 * Search for an owner of a buffer by iterating over all ION clients.
1732 * @param dev ion device containing pointers to all the clients.
1733 * @param buffer pointer to buffer we are trying to find the owner of.
1734 * @return name of owner.
1735 */
1736const char *ion_debug_locate_owner(const struct ion_device *dev,
1737 const struct ion_buffer *buffer)
1738{
1739 struct rb_node *j;
1740 const char *client_name = NULL;
1741
1742 for (j = rb_first(&dev->user_clients); j && !client_name;
1743 j = rb_next(j)) {
1744 struct ion_client *client = rb_entry(j, struct ion_client,
1745 node);
1746 if (ion_debug_find_buffer_owner(client, buffer))
1747 client_name = client->name;
1748 }
1749 for (j = rb_first(&dev->kernel_clients); j && !client_name;
1750 j = rb_next(j)) {
1751 struct ion_client *client = rb_entry(j, struct ion_client,
1752 node);
1753 if (ion_debug_find_buffer_owner(client, buffer))
1754 client_name = client->name;
1755 }
1756 return client_name;
1757}
1758
1759/**
1760 * Create a mem_map of the heap.
1761 * @param s seq_file to log error message to.
1762 * @param heap The heap to create mem_map for.
1763 * @param mem_map The mem map to be created.
1764 */
1765void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1766 struct rb_root *mem_map)
1767{
1768 struct ion_device *dev = heap->dev;
1769 struct rb_node *n;
1770
1771 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1772 struct ion_buffer *buffer =
1773 rb_entry(n, struct ion_buffer, node);
1774 if (buffer->heap->id == heap->id) {
1775 struct mem_map_data *data =
1776 kzalloc(sizeof(*data), GFP_KERNEL);
1777 if (!data) {
1778 seq_printf(s, "ERROR: out of memory. "
1779 "Part of memory map will not be logged\n");
1780 break;
1781 }
1782 data->addr = buffer->priv_phys;
1783 data->addr_end = buffer->priv_phys + buffer->size-1;
1784 data->size = buffer->size;
1785 data->client_name = ion_debug_locate_owner(dev, buffer);
1786 ion_debug_mem_map_add(mem_map, data);
1787 }
1788 }
1789}
1790
1791/**
1792 * Free the memory allocated by ion_debug_mem_map_create
1793 * @param mem_map The mem map to free.
1794 */
1795static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1796{
1797 if (mem_map) {
1798 struct rb_node *n;
1799 while ((n = rb_first(mem_map)) != 0) {
1800 struct mem_map_data *data =
1801 rb_entry(n, struct mem_map_data, node);
1802 rb_erase(&data->node, mem_map);
1803 kfree(data);
1804 }
1805 }
1806}
1807
1808/**
1809 * Print heap debug information.
1810 * @param s seq_file to log message to.
1811 * @param heap pointer to heap that we will print debug information for.
1812 */
1813static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1814{
1815 if (heap->ops->print_debug) {
1816 struct rb_root mem_map = RB_ROOT;
1817 ion_debug_mem_map_create(s, heap, &mem_map);
1818 heap->ops->print_debug(heap, s, &mem_map);
1819 ion_debug_mem_map_destroy(&mem_map);
1820 }
1821}
1822
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001823static int ion_debug_heap_show(struct seq_file *s, void *unused)
1824{
1825 struct ion_heap *heap = s->private;
1826 struct ion_device *dev = heap->dev;
1827 struct rb_node *n;
1828
Olav Haugane4900b52012-05-25 11:58:03 -07001829 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001830 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001831 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001832 struct ion_client *client = rb_entry(n, struct ion_client,
1833 node);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001834 char task_comm[TASK_COMM_LEN];
Laura Abbott3647ac32011-10-31 14:09:53 -07001835 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001836 if (!size)
1837 continue;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001838
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001839 get_task_comm(task_comm, client->task);
Laura Abbott8747bbe2011-10-31 14:18:13 -07001840 seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001841 size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001842 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001843
1844 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1845 struct ion_client *client = rb_entry(n, struct ion_client,
1846 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001847 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001848 if (!size)
1849 continue;
Laura Abbott8747bbe2011-10-31 14:18:13 -07001850 seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001851 size);
1852 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001853 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001854 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001855 return 0;
1856}
1857
1858static int ion_debug_heap_open(struct inode *inode, struct file *file)
1859{
1860 return single_open(file, ion_debug_heap_show, inode->i_private);
1861}
1862
1863static const struct file_operations debug_heap_fops = {
1864 .open = ion_debug_heap_open,
1865 .read = seq_read,
1866 .llseek = seq_lseek,
1867 .release = single_release,
1868};
1869
1870void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1871{
1872 struct rb_node **p = &dev->heaps.rb_node;
1873 struct rb_node *parent = NULL;
1874 struct ion_heap *entry;
1875
1876 heap->dev = dev;
1877 mutex_lock(&dev->lock);
1878 while (*p) {
1879 parent = *p;
1880 entry = rb_entry(parent, struct ion_heap, node);
1881
1882 if (heap->id < entry->id) {
1883 p = &(*p)->rb_left;
1884 } else if (heap->id > entry->id ) {
1885 p = &(*p)->rb_right;
1886 } else {
1887 pr_err("%s: can not insert multiple heaps with "
1888 "id %d\n", __func__, heap->id);
1889 goto end;
1890 }
1891 }
1892
1893 rb_link_node(&heap->node, parent, p);
1894 rb_insert_color(&heap->node, &dev->heaps);
1895 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1896 &debug_heap_fops);
1897end:
1898 mutex_unlock(&dev->lock);
1899}
1900
Laura Abbott7e446482012-06-13 15:59:39 -07001901int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1902 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001903{
1904 struct rb_node *n;
1905 int ret_val = 0;
1906
1907 /*
1908 * traverse the list of heaps available in this system
1909 * and find the heap that is specified.
1910 */
1911 mutex_lock(&dev->lock);
1912 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1913 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1914 if (heap->type != ION_HEAP_TYPE_CP)
1915 continue;
1916 if (ION_HEAP(heap->id) != heap_id)
1917 continue;
1918 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001919 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001920 else
1921 ret_val = -EINVAL;
1922 break;
1923 }
1924 mutex_unlock(&dev->lock);
1925 return ret_val;
1926}
Olav Haugan0a852512012-01-09 10:20:55 -08001927
Laura Abbott7e446482012-06-13 15:59:39 -07001928int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1929 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001930{
1931 struct rb_node *n;
1932 int ret_val = 0;
1933
1934 /*
1935 * traverse the list of heaps available in this system
1936 * and find the heap that is specified.
1937 */
1938 mutex_lock(&dev->lock);
1939 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1940 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1941 if (heap->type != ION_HEAP_TYPE_CP)
1942 continue;
1943 if (ION_HEAP(heap->id) != heap_id)
1944 continue;
1945 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001946 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001947 else
1948 ret_val = -EINVAL;
1949 break;
1950 }
1951 mutex_unlock(&dev->lock);
1952 return ret_val;
1953}
Olav Haugan0a852512012-01-09 10:20:55 -08001954
Laura Abbott404f8242011-10-31 14:22:53 -07001955static int ion_debug_leak_show(struct seq_file *s, void *unused)
1956{
1957 struct ion_device *dev = s->private;
1958 struct rb_node *n;
1959 struct rb_node *n2;
1960
1961 /* mark all buffers as 1 */
1962 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1963 "ref cnt");
1964 mutex_lock(&dev->lock);
1965 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1966 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1967 node);
1968
1969 buf->marked = 1;
1970 }
1971
1972 /* now see which buffers we can access */
1973 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1974 struct ion_client *client = rb_entry(n, struct ion_client,
1975 node);
1976
1977 mutex_lock(&client->lock);
1978 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1979 struct ion_handle *handle = rb_entry(n2,
1980 struct ion_handle, node);
1981
1982 handle->buffer->marked = 0;
1983
1984 }
1985 mutex_unlock(&client->lock);
1986
1987 }
1988
1989 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1990 struct ion_client *client = rb_entry(n, struct ion_client,
1991 node);
1992
1993 mutex_lock(&client->lock);
1994 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1995 struct ion_handle *handle = rb_entry(n2,
1996 struct ion_handle, node);
1997
1998 handle->buffer->marked = 0;
1999
2000 }
2001 mutex_unlock(&client->lock);
2002
2003 }
2004 /* And anyone still marked as a 1 means a leaked handle somewhere */
2005 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2006 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
2007 node);
2008
2009 if (buf->marked == 1)
2010 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
2011 (int)buf, buf->heap->name, buf->size,
2012 atomic_read(&buf->ref.refcount));
2013 }
2014 mutex_unlock(&dev->lock);
2015 return 0;
2016}
2017
2018static int ion_debug_leak_open(struct inode *inode, struct file *file)
2019{
2020 return single_open(file, ion_debug_leak_show, inode->i_private);
2021}
2022
2023static const struct file_operations debug_leak_fops = {
2024 .open = ion_debug_leak_open,
2025 .read = seq_read,
2026 .llseek = seq_lseek,
2027 .release = single_release,
2028};
2029
2030
2031
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002032struct ion_device *ion_device_create(long (*custom_ioctl)
2033 (struct ion_client *client,
2034 unsigned int cmd,
2035 unsigned long arg))
2036{
2037 struct ion_device *idev;
2038 int ret;
2039
2040 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2041 if (!idev)
2042 return ERR_PTR(-ENOMEM);
2043
2044 idev->dev.minor = MISC_DYNAMIC_MINOR;
2045 idev->dev.name = "ion";
2046 idev->dev.fops = &ion_fops;
2047 idev->dev.parent = NULL;
2048 ret = misc_register(&idev->dev);
2049 if (ret) {
2050 pr_err("ion: failed to register misc device.\n");
2051 return ERR_PTR(ret);
2052 }
2053
2054 idev->debug_root = debugfs_create_dir("ion", NULL);
2055 if (IS_ERR_OR_NULL(idev->debug_root))
2056 pr_err("ion: failed to create debug files.\n");
2057
2058 idev->custom_ioctl = custom_ioctl;
2059 idev->buffers = RB_ROOT;
2060 mutex_init(&idev->lock);
2061 idev->heaps = RB_ROOT;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07002062 idev->user_clients = RB_ROOT;
2063 idev->kernel_clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07002064 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
2065 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07002066 return idev;
2067}
2068
2069void ion_device_destroy(struct ion_device *dev)
2070{
2071 misc_deregister(&dev->dev);
2072 /* XXX need to free the heaps and clients ? */
2073 kfree(dev);
2074}