blob: 7f760ed9401f7bba71727a9a96b738b805a0e736 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070037
Laura Abbott8c017362011-09-22 20:59:12 -070038#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039#include "ion_priv.h"
40#define DEBUG
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @lock: lock protecting the buffers & heaps trees
47 * @heaps: list of all the heaps in the system
48 * @user_clients: list of all the clients created from userspace
49 */
50struct ion_device {
51 struct miscdevice dev;
52 struct rb_root buffers;
53 struct mutex lock;
54 struct rb_root heaps;
55 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
56 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080057 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070058 struct dentry *debug_root;
59};
60
61/**
62 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070063 * @node: node in the tree of all clients
64 * @dev: backpointer to ion device
65 * @handles: an rb tree of all the handles in this client
66 * @lock: lock protecting the tree of handles
67 * @heap_mask: mask of all supported heaps
68 * @name: used for debugging
69 * @task: used for debugging
70 *
71 * A client represents a list of buffers this client may access.
72 * The mutex stored here is used to protect both handles tree
73 * as well as the handles themselves, and should be held while modifying either.
74 */
75struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080081 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070095 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700105 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700106};
107
Olav Hauganb3676592012-03-02 15:02:25 -0800108static void ion_iommu_release(struct kref *kref);
109
Laura Abbott8c017362011-09-22 20:59:12 -0700110static int ion_validate_buffer_flags(struct ion_buffer *buffer,
111 unsigned long flags)
112{
113 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
114 buffer->iommu_map_cnt) {
115 if (buffer->flags != flags) {
116 pr_err("%s: buffer was already mapped with flags %lx,"
117 " cannot map with flags %lx\n", __func__,
118 buffer->flags, flags);
119 return 1;
120 }
121
122 } else {
123 buffer->flags = flags;
124 }
125 return 0;
126}
127
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700128/* this function should only be called while dev->lock is held */
129static void ion_buffer_add(struct ion_device *dev,
130 struct ion_buffer *buffer)
131{
132 struct rb_node **p = &dev->buffers.rb_node;
133 struct rb_node *parent = NULL;
134 struct ion_buffer *entry;
135
136 while (*p) {
137 parent = *p;
138 entry = rb_entry(parent, struct ion_buffer, node);
139
140 if (buffer < entry) {
141 p = &(*p)->rb_left;
142 } else if (buffer > entry) {
143 p = &(*p)->rb_right;
144 } else {
145 pr_err("%s: buffer already found.", __func__);
146 BUG();
147 }
148 }
149
150 rb_link_node(&buffer->node, parent, p);
151 rb_insert_color(&buffer->node, &dev->buffers);
152}
153
Olav Haugan0fa9b602012-01-25 11:50:38 -0800154static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700155 struct ion_iommu_map *iommu)
156{
157 struct rb_node **p = &buffer->iommu_maps.rb_node;
158 struct rb_node *parent = NULL;
159 struct ion_iommu_map *entry;
160
161 while (*p) {
162 parent = *p;
163 entry = rb_entry(parent, struct ion_iommu_map, node);
164
165 if (iommu->key < entry->key) {
166 p = &(*p)->rb_left;
167 } else if (iommu->key > entry->key) {
168 p = &(*p)->rb_right;
169 } else {
170 pr_err("%s: buffer %p already has mapping for domain %d"
171 " and partition %d\n", __func__,
172 buffer,
173 iommu_map_domain(iommu),
174 iommu_map_partition(iommu));
175 BUG();
176 }
177 }
178
179 rb_link_node(&iommu->node, parent, p);
180 rb_insert_color(&iommu->node, &buffer->iommu_maps);
181
182}
183
184static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
185 unsigned int domain_no,
186 unsigned int partition_no)
187{
188 struct rb_node **p = &buffer->iommu_maps.rb_node;
189 struct rb_node *parent = NULL;
190 struct ion_iommu_map *entry;
191 uint64_t key = domain_no;
192 key = key << 32 | partition_no;
193
194 while (*p) {
195 parent = *p;
196 entry = rb_entry(parent, struct ion_iommu_map, node);
197
198 if (key < entry->key)
199 p = &(*p)->rb_left;
200 else if (key > entry->key)
201 p = &(*p)->rb_right;
202 else
203 return entry;
204 }
205
206 return NULL;
207}
208
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700209/* this function should only be called while dev->lock is held */
210static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
211 struct ion_device *dev,
212 unsigned long len,
213 unsigned long align,
214 unsigned long flags)
215{
216 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800217 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700218 int ret;
219
220 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
221 if (!buffer)
222 return ERR_PTR(-ENOMEM);
223
224 buffer->heap = heap;
225 kref_init(&buffer->ref);
226
227 ret = heap->ops->allocate(heap, buffer, len, align, flags);
228 if (ret) {
229 kfree(buffer);
230 return ERR_PTR(ret);
231 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800232
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700233 buffer->dev = dev;
234 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800235
236 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
237 if (IS_ERR_OR_NULL(table)) {
238 heap->ops->free(buffer);
239 kfree(buffer);
240 return ERR_PTR(PTR_ERR(table));
241 }
242 buffer->sg_table = table;
243
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700244 mutex_init(&buffer->lock);
245 ion_buffer_add(dev, buffer);
246 return buffer;
247}
248
Olav Hauganb3676592012-03-02 15:02:25 -0800249/**
250 * Check for delayed IOMMU unmapping. Also unmap any outstanding
251 * mappings which would otherwise have been leaked.
252 */
253static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
254{
255 struct ion_iommu_map *iommu_map;
256 struct rb_node *node;
257 const struct rb_root *rb = &(buffer->iommu_maps);
258 unsigned long ref_count;
259 unsigned int delayed_unmap;
260
261 mutex_lock(&buffer->lock);
262
263 while ((node = rb_first(rb)) != 0) {
264 iommu_map = rb_entry(node, struct ion_iommu_map, node);
265 ref_count = atomic_read(&iommu_map->ref.refcount);
266 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
267
268 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
269 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
270 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
271 iommu_map->domain_info[DI_PARTITION_NUM]);
272 }
273 /* set ref count to 1 to force release */
274 kref_init(&iommu_map->ref);
275 kref_put(&iommu_map->ref, ion_iommu_release);
276 }
277
278 mutex_unlock(&buffer->lock);
279}
280
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700281static void ion_buffer_destroy(struct kref *kref)
282{
283 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
284 struct ion_device *dev = buffer->dev;
285
Laura Abbottb14ed962012-01-30 14:18:08 -0800286 if (WARN_ON(buffer->kmap_cnt > 0))
287 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
288
289 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
290
Olav Hauganb3676592012-03-02 15:02:25 -0800291 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700292 buffer->heap->ops->free(buffer);
293 mutex_lock(&dev->lock);
294 rb_erase(&buffer->node, &dev->buffers);
295 mutex_unlock(&dev->lock);
296 kfree(buffer);
297}
298
299static void ion_buffer_get(struct ion_buffer *buffer)
300{
301 kref_get(&buffer->ref);
302}
303
304static int ion_buffer_put(struct ion_buffer *buffer)
305{
306 return kref_put(&buffer->ref, ion_buffer_destroy);
307}
308
309static struct ion_handle *ion_handle_create(struct ion_client *client,
310 struct ion_buffer *buffer)
311{
312 struct ion_handle *handle;
313
314 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
315 if (!handle)
316 return ERR_PTR(-ENOMEM);
317 kref_init(&handle->ref);
318 rb_init_node(&handle->node);
319 handle->client = client;
320 ion_buffer_get(buffer);
321 handle->buffer = buffer;
322
323 return handle;
324}
325
Laura Abbottb14ed962012-01-30 14:18:08 -0800326static void ion_handle_kmap_put(struct ion_handle *);
327
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700328static void ion_handle_destroy(struct kref *kref)
329{
330 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800331 struct ion_client *client = handle->client;
332 struct ion_buffer *buffer = handle->buffer;
333
Laura Abbottb14ed962012-01-30 14:18:08 -0800334 mutex_lock(&buffer->lock);
335 while (handle->kmap_cnt)
336 ion_handle_kmap_put(handle);
337 mutex_unlock(&buffer->lock);
338
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700339 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800340 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800341
342 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700343 kfree(handle);
344}
345
346struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
347{
348 return handle->buffer;
349}
350
351static void ion_handle_get(struct ion_handle *handle)
352{
353 kref_get(&handle->ref);
354}
355
356static int ion_handle_put(struct ion_handle *handle)
357{
358 return kref_put(&handle->ref, ion_handle_destroy);
359}
360
361static struct ion_handle *ion_handle_lookup(struct ion_client *client,
362 struct ion_buffer *buffer)
363{
364 struct rb_node *n;
365
366 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
367 struct ion_handle *handle = rb_entry(n, struct ion_handle,
368 node);
369 if (handle->buffer == buffer)
370 return handle;
371 }
372 return NULL;
373}
374
375static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
376{
377 struct rb_node *n = client->handles.rb_node;
378
379 while (n) {
380 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
381 node);
382 if (handle < handle_node)
383 n = n->rb_left;
384 else if (handle > handle_node)
385 n = n->rb_right;
386 else
387 return true;
388 }
389 return false;
390}
391
392static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
393{
394 struct rb_node **p = &client->handles.rb_node;
395 struct rb_node *parent = NULL;
396 struct ion_handle *entry;
397
398 while (*p) {
399 parent = *p;
400 entry = rb_entry(parent, struct ion_handle, node);
401
402 if (handle < entry)
403 p = &(*p)->rb_left;
404 else if (handle > entry)
405 p = &(*p)->rb_right;
406 else
407 WARN(1, "%s: buffer already found.", __func__);
408 }
409
410 rb_link_node(&handle->node, parent, p);
411 rb_insert_color(&handle->node, &client->handles);
412}
413
414struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
415 size_t align, unsigned int flags)
416{
417 struct rb_node *n;
418 struct ion_handle *handle;
419 struct ion_device *dev = client->dev;
420 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800421 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800422 const unsigned int MAX_DBG_STR_LEN = 64;
423 char dbg_str[MAX_DBG_STR_LEN];
424 unsigned int dbg_str_idx = 0;
425
426 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700427
428 /*
429 * traverse the list of heaps available in this system in priority
430 * order. If the heap type is supported by the client, and matches the
431 * request of the caller allocate from it. Repeat until allocate has
432 * succeeded or all heaps have been tried
433 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800434 if (WARN_ON(!len))
435 return ERR_PTR(-EINVAL);
436
437 len = PAGE_ALIGN(len);
438
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700439 mutex_lock(&dev->lock);
440 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
441 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
442 /* if the client doesn't support this heap type */
443 if (!((1 << heap->type) & client->heap_mask))
444 continue;
445 /* if the caller didn't specify this heap type */
446 if (!((1 << heap->id) & flags))
447 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 /* Do not allow un-secure heap if secure is specified */
449 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
450 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700451 buffer = ion_buffer_create(heap, dev, len, align, flags);
452 if (!IS_ERR_OR_NULL(buffer))
453 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800454 if (dbg_str_idx < MAX_DBG_STR_LEN) {
455 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
456 int ret_value = snprintf(&dbg_str[dbg_str_idx],
457 len_left, "%s ", heap->name);
458 if (ret_value >= len_left) {
459 /* overflow */
460 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
461 dbg_str_idx = MAX_DBG_STR_LEN;
462 } else if (ret_value >= 0) {
463 dbg_str_idx += ret_value;
464 } else {
465 /* error */
466 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
467 }
468 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700469 }
470 mutex_unlock(&dev->lock);
471
Laura Abbottb14ed962012-01-30 14:18:08 -0800472 if (buffer == NULL)
473 return ERR_PTR(-ENODEV);
474
475 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800476 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
477 "0x%x) from heap(s) %sfor client %s with heap "
478 "mask 0x%x\n",
479 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700480 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800481 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700482
483 handle = ion_handle_create(client, buffer);
484
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700485 /*
486 * ion_buffer_create will create a buffer with a ref_cnt of 1,
487 * and ion_handle_create will take a second reference, drop one here
488 */
489 ion_buffer_put(buffer);
490
Laura Abbottb14ed962012-01-30 14:18:08 -0800491 if (!IS_ERR(handle)) {
492 mutex_lock(&client->lock);
493 ion_handle_add(client, handle);
494 mutex_unlock(&client->lock);
495 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700496
Laura Abbottb14ed962012-01-30 14:18:08 -0800497
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700498 return handle;
499}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800500EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700501
502void ion_free(struct ion_client *client, struct ion_handle *handle)
503{
504 bool valid_handle;
505
506 BUG_ON(client != handle->client);
507
508 mutex_lock(&client->lock);
509 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700510 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800511 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700512 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700513 return;
514 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800515 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700516 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700517}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800518EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700519
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700520int ion_phys(struct ion_client *client, struct ion_handle *handle,
521 ion_phys_addr_t *addr, size_t *len)
522{
523 struct ion_buffer *buffer;
524 int ret;
525
526 mutex_lock(&client->lock);
527 if (!ion_handle_validate(client, handle)) {
528 mutex_unlock(&client->lock);
529 return -EINVAL;
530 }
531
532 buffer = handle->buffer;
533
534 if (!buffer->heap->ops->phys) {
535 pr_err("%s: ion_phys is not implemented by this heap.\n",
536 __func__);
537 mutex_unlock(&client->lock);
538 return -ENODEV;
539 }
540 mutex_unlock(&client->lock);
541 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
542 return ret;
543}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800544EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700545
Laura Abbottb14ed962012-01-30 14:18:08 -0800546static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700547{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700548 void *vaddr;
549
Laura Abbottb14ed962012-01-30 14:18:08 -0800550 if (buffer->kmap_cnt) {
551 buffer->kmap_cnt++;
552 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700553 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800554 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
555 if (IS_ERR_OR_NULL(vaddr))
556 return vaddr;
557 buffer->vaddr = vaddr;
558 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700559 return vaddr;
560}
Laura Abbottb14ed962012-01-30 14:18:08 -0800561
562static void *ion_handle_kmap_get(struct ion_handle *handle)
563{
564 struct ion_buffer *buffer = handle->buffer;
565 void *vaddr;
566
567 if (handle->kmap_cnt) {
568 handle->kmap_cnt++;
569 return buffer->vaddr;
570 }
571 vaddr = ion_buffer_kmap_get(buffer);
572 if (IS_ERR_OR_NULL(vaddr))
573 return vaddr;
574 handle->kmap_cnt++;
575 return vaddr;
576}
577
578static void ion_buffer_kmap_put(struct ion_buffer *buffer)
579{
580 buffer->kmap_cnt--;
581 if (!buffer->kmap_cnt) {
582 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
583 buffer->vaddr = NULL;
584 }
585}
586
587static void ion_handle_kmap_put(struct ion_handle *handle)
588{
589 struct ion_buffer *buffer = handle->buffer;
590
591 handle->kmap_cnt--;
592 if (!handle->kmap_cnt)
593 ion_buffer_kmap_put(buffer);
594}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700595
Olav Hauganb3676592012-03-02 15:02:25 -0800596static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700597 int domain_num, int partition_num, unsigned long align,
598 unsigned long iova_length, unsigned long flags,
599 unsigned long *iova)
600{
601 struct ion_iommu_map *data;
602 int ret;
603
604 data = kmalloc(sizeof(*data), GFP_ATOMIC);
605
606 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800607 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700608
609 data->buffer = buffer;
610 iommu_map_domain(data) = domain_num;
611 iommu_map_partition(data) = partition_num;
612
613 ret = buffer->heap->ops->map_iommu(buffer, data,
614 domain_num,
615 partition_num,
616 align,
617 iova_length,
618 flags);
619
620 if (ret)
621 goto out;
622
623 kref_init(&data->ref);
624 *iova = data->iova_addr;
625
626 ion_iommu_add(buffer, data);
627
Olav Hauganb3676592012-03-02 15:02:25 -0800628 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700629
630out:
Laura Abbott8c017362011-09-22 20:59:12 -0700631 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800632 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700633}
634
635int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
636 int domain_num, int partition_num, unsigned long align,
637 unsigned long iova_length, unsigned long *iova,
638 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800639 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700640{
641 struct ion_buffer *buffer;
642 struct ion_iommu_map *iommu_map;
643 int ret = 0;
644
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800645 if (ION_IS_CACHED(flags)) {
646 pr_err("%s: Cannot map iommu as cached.\n", __func__);
647 return -EINVAL;
648 }
649
Laura Abbott8c017362011-09-22 20:59:12 -0700650 mutex_lock(&client->lock);
651 if (!ion_handle_validate(client, handle)) {
652 pr_err("%s: invalid handle passed to map_kernel.\n",
653 __func__);
654 mutex_unlock(&client->lock);
655 return -EINVAL;
656 }
657
658 buffer = handle->buffer;
659 mutex_lock(&buffer->lock);
660
661 if (!handle->buffer->heap->ops->map_iommu) {
662 pr_err("%s: map_iommu is not implemented by this heap.\n",
663 __func__);
664 ret = -ENODEV;
665 goto out;
666 }
667
Laura Abbott8c017362011-09-22 20:59:12 -0700668 /*
669 * If clients don't want a custom iova length, just use whatever
670 * the buffer size is
671 */
672 if (!iova_length)
673 iova_length = buffer->size;
674
675 if (buffer->size > iova_length) {
676 pr_debug("%s: iova length %lx is not at least buffer size"
677 " %x\n", __func__, iova_length, buffer->size);
678 ret = -EINVAL;
679 goto out;
680 }
681
682 if (buffer->size & ~PAGE_MASK) {
683 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
684 buffer->size, PAGE_SIZE);
685 ret = -EINVAL;
686 goto out;
687 }
688
689 if (iova_length & ~PAGE_MASK) {
690 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
691 iova_length, PAGE_SIZE);
692 ret = -EINVAL;
693 goto out;
694 }
695
696 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800697 if (!iommu_map) {
698 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
699 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800700 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800701 iommu_map->flags = iommu_flags;
702
703 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
704 kref_get(&iommu_map->ref);
Laura Abbott11bca602012-09-14 12:48:18 -0700705 } else {
706 ret = PTR_ERR(iommu_map);
Olav Hauganb3676592012-03-02 15:02:25 -0800707 }
Laura Abbott8c017362011-09-22 20:59:12 -0700708 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800709 if (iommu_map->flags != iommu_flags) {
710 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
711 __func__, handle,
712 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800713 ret = -EINVAL;
714 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700715 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800716 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700717 __func__, handle, iommu_map->mapped_size,
718 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700719 ret = -EINVAL;
720 } else {
721 kref_get(&iommu_map->ref);
722 *iova = iommu_map->iova_addr;
723 }
724 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800725 if (!ret)
726 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700727 *buffer_size = buffer->size;
728out:
729 mutex_unlock(&buffer->lock);
730 mutex_unlock(&client->lock);
731 return ret;
732}
733EXPORT_SYMBOL(ion_map_iommu);
734
735static void ion_iommu_release(struct kref *kref)
736{
737 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
738 ref);
739 struct ion_buffer *buffer = map->buffer;
740
741 rb_erase(&map->node, &buffer->iommu_maps);
742 buffer->heap->ops->unmap_iommu(map);
743 kfree(map);
744}
745
746void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
747 int domain_num, int partition_num)
748{
749 struct ion_iommu_map *iommu_map;
750 struct ion_buffer *buffer;
751
752 mutex_lock(&client->lock);
753 buffer = handle->buffer;
754
755 mutex_lock(&buffer->lock);
756
757 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
758
759 if (!iommu_map) {
760 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
761 domain_num, partition_num, buffer);
762 goto out;
763 }
764
Laura Abbott8c017362011-09-22 20:59:12 -0700765 kref_put(&iommu_map->ref, ion_iommu_release);
766
Laura Abbottb14ed962012-01-30 14:18:08 -0800767 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700768out:
769 mutex_unlock(&buffer->lock);
770
771 mutex_unlock(&client->lock);
772
773}
774EXPORT_SYMBOL(ion_unmap_iommu);
775
Laura Abbottb14ed962012-01-30 14:18:08 -0800776void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
777 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700778{
779 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700781
782 mutex_lock(&client->lock);
783 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800784 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700785 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700786 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700787 return ERR_PTR(-EINVAL);
788 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700789
Laura Abbottb14ed962012-01-30 14:18:08 -0800790 buffer = handle->buffer;
791
792 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700793 pr_err("%s: map_kernel is not implemented by this heap.\n",
794 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700795 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700796 return ERR_PTR(-ENODEV);
797 }
Laura Abbott894fd582011-08-19 13:33:56 -0700798
Laura Abbott8c017362011-09-22 20:59:12 -0700799 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800800 mutex_unlock(&client->lock);
801 return ERR_PTR(-EEXIST);
Laura Abbott894fd582011-08-19 13:33:56 -0700802 }
803
Laura Abbottb14ed962012-01-30 14:18:08 -0800804 mutex_lock(&buffer->lock);
805 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700806 mutex_unlock(&buffer->lock);
807 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800808 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700809}
Olav Hauganbd453a92012-07-05 14:21:34 -0700810EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700811
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700812void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
813{
814 struct ion_buffer *buffer;
815
816 mutex_lock(&client->lock);
817 buffer = handle->buffer;
818 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800819 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700820 mutex_unlock(&buffer->lock);
821 mutex_unlock(&client->lock);
822}
Olav Hauganbd453a92012-07-05 14:21:34 -0700823EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700824
Olav Haugan41f85792012-02-08 15:28:05 -0800825int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700826 void *uaddr, unsigned long offset, unsigned long len,
827 unsigned int cmd)
828{
829 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700830 int ret = -EINVAL;
831
832 mutex_lock(&client->lock);
833 if (!ion_handle_validate(client, handle)) {
834 pr_err("%s: invalid handle passed to do_cache_op.\n",
835 __func__);
836 mutex_unlock(&client->lock);
837 return -EINVAL;
838 }
839 buffer = handle->buffer;
840 mutex_lock(&buffer->lock);
841
Laura Abbottcbaa6682011-10-19 12:14:14 -0700842 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700843 ret = 0;
844 goto out;
845 }
846
847 if (!handle->buffer->heap->ops->cache_op) {
848 pr_err("%s: cache_op is not implemented by this heap.\n",
849 __func__);
850 ret = -ENODEV;
851 goto out;
852 }
853
Laura Abbottabcb6f72011-10-04 16:26:49 -0700854
855 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
856 offset, len, cmd);
857
858out:
859 mutex_unlock(&buffer->lock);
860 mutex_unlock(&client->lock);
861 return ret;
862
863}
Olav Hauganbd453a92012-07-05 14:21:34 -0700864EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700865
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700866static int ion_debug_client_show(struct seq_file *s, void *unused)
867{
868 struct ion_client *client = s->private;
869 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700870 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700871
Olav Haugan854c9e12012-05-16 16:34:28 -0700872 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
873 "heap_name", "size_in_bytes", "handle refcount",
874 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700875
876 mutex_lock(&client->lock);
877 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
878 struct ion_handle *handle = rb_entry(n, struct ion_handle,
879 node);
880 enum ion_heap_type type = handle->buffer->heap->type;
881
Olav Haugan854c9e12012-05-16 16:34:28 -0700882 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700883 handle->buffer->heap->name,
884 handle->buffer->size,
885 atomic_read(&handle->ref.refcount),
886 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700887
888 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
889 type == ION_HEAP_TYPE_CARVEOUT ||
890 type == ION_HEAP_TYPE_CP)
891 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
892 else
893 seq_printf(s, " : %12s", "N/A");
894
895 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
896 n2 = rb_next(n2)) {
897 struct ion_iommu_map *imap =
898 rb_entry(n2, struct ion_iommu_map, node);
899 seq_printf(s, " : [%d,%d] - %8lx",
900 imap->domain_info[DI_DOMAIN_NUM],
901 imap->domain_info[DI_PARTITION_NUM],
902 imap->iova_addr);
903 }
904 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700905 }
906 mutex_unlock(&client->lock);
907
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700908 return 0;
909}
910
911static int ion_debug_client_open(struct inode *inode, struct file *file)
912{
913 return single_open(file, ion_debug_client_show, inode->i_private);
914}
915
916static const struct file_operations debug_client_fops = {
917 .open = ion_debug_client_open,
918 .read = seq_read,
919 .llseek = seq_lseek,
920 .release = single_release,
921};
922
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700923struct ion_client *ion_client_create(struct ion_device *dev,
924 unsigned int heap_mask,
925 const char *name)
926{
927 struct ion_client *client;
928 struct task_struct *task;
929 struct rb_node **p;
930 struct rb_node *parent = NULL;
931 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700932 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700933 unsigned int name_len;
934
935 if (!name) {
936 pr_err("%s: Name cannot be null\n", __func__);
937 return ERR_PTR(-EINVAL);
938 }
939 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700940
941 get_task_struct(current->group_leader);
942 task_lock(current->group_leader);
943 pid = task_pid_nr(current->group_leader);
944 /* don't bother to store task struct for kernel threads,
945 they can't be killed anyway */
946 if (current->group_leader->flags & PF_KTHREAD) {
947 put_task_struct(current->group_leader);
948 task = NULL;
949 } else {
950 task = current->group_leader;
951 }
952 task_unlock(current->group_leader);
953
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700954 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
955 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800956 if (task)
957 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700958 return ERR_PTR(-ENOMEM);
959 }
960
961 client->dev = dev;
962 client->handles = RB_ROOT;
963 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800964
Olav Haugan6625c7d12012-01-24 13:50:43 -0800965 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800966 if (!client->name) {
967 put_task_struct(current->group_leader);
968 kfree(client);
969 return ERR_PTR(-ENOMEM);
970 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800971 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800972 }
973
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700974 client->heap_mask = heap_mask;
975 client->task = task;
976 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700977
978 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800979 p = &dev->clients.rb_node;
980 while (*p) {
981 parent = *p;
982 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700983
Laura Abbottb14ed962012-01-30 14:18:08 -0800984 if (client < entry)
985 p = &(*p)->rb_left;
986 else if (client > entry)
987 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700988 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800989 rb_link_node(&client->node, parent, p);
990 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700991
Laura Abbotteed86032011-12-05 15:32:36 -0800992
993 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700994 dev->debug_root, client,
995 &debug_client_fops);
996 mutex_unlock(&dev->lock);
997
998 return client;
999}
1000
Laura Abbottb14ed962012-01-30 14:18:08 -08001001void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001002{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001003 struct ion_device *dev = client->dev;
1004 struct rb_node *n;
1005
1006 pr_debug("%s: %d\n", __func__, __LINE__);
1007 while ((n = rb_first(&client->handles))) {
1008 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1009 node);
1010 ion_handle_destroy(&handle->ref);
1011 }
1012 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001013 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001014 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001015 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016 debugfs_remove_recursive(client->debug_root);
1017 mutex_unlock(&dev->lock);
1018
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001019 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001020 kfree(client);
1021}
Olav Hauganbd453a92012-07-05 14:21:34 -07001022EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001023
Laura Abbott273dd8e2011-10-12 14:26:33 -07001024int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1025 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001026{
1027 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001028
1029 mutex_lock(&client->lock);
1030 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001031 pr_err("%s: invalid handle passed to %s.\n",
1032 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001033 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001034 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001035 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001036 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001037 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001038 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001040 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001041
Laura Abbott273dd8e2011-10-12 14:26:33 -07001042 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001043}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001044EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045
Laura Abbott8c017362011-09-22 20:59:12 -07001046int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1047 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001048{
Laura Abbott8c017362011-09-22 20:59:12 -07001049 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001050
Laura Abbott8c017362011-09-22 20:59:12 -07001051 mutex_lock(&client->lock);
1052 if (!ion_handle_validate(client, handle)) {
1053 pr_err("%s: invalid handle passed to %s.\n",
1054 __func__, __func__);
1055 mutex_unlock(&client->lock);
1056 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001057 }
Laura Abbott8c017362011-09-22 20:59:12 -07001058 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001059 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001060 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001061 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001062 mutex_unlock(&client->lock);
1063
1064 return 0;
1065}
1066EXPORT_SYMBOL(ion_handle_get_size);
1067
Laura Abbottb14ed962012-01-30 14:18:08 -08001068struct sg_table *ion_sg_table(struct ion_client *client,
1069 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001070{
Laura Abbottb14ed962012-01-30 14:18:08 -08001071 struct ion_buffer *buffer;
1072 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001073
Laura Abbottb14ed962012-01-30 14:18:08 -08001074 mutex_lock(&client->lock);
1075 if (!ion_handle_validate(client, handle)) {
1076 pr_err("%s: invalid handle passed to map_dma.\n",
1077 __func__);
1078 mutex_unlock(&client->lock);
1079 return ERR_PTR(-EINVAL);
1080 }
1081 buffer = handle->buffer;
1082 table = buffer->sg_table;
1083 mutex_unlock(&client->lock);
1084 return table;
1085}
Olav Hauganbd453a92012-07-05 14:21:34 -07001086EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001087
1088static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1089 enum dma_data_direction direction)
1090{
1091 struct dma_buf *dmabuf = attachment->dmabuf;
1092 struct ion_buffer *buffer = dmabuf->priv;
1093
1094 return buffer->sg_table;
1095}
1096
1097static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1098 struct sg_table *table,
1099 enum dma_data_direction direction)
1100{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001101}
1102
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001103static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001104{
Laura Abbottb14ed962012-01-30 14:18:08 -08001105 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001106
1107 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001108
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001109 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001110 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001111 mutex_unlock(&buffer->lock);
1112}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001113
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001114static void ion_vma_close(struct vm_area_struct *vma)
1115{
Laura Abbottb14ed962012-01-30 14:18:08 -08001116 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001117
1118 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001119
Laura Abbott77168502011-12-05 11:06:24 -08001120 mutex_lock(&buffer->lock);
1121 buffer->umap_cnt--;
1122 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001123
1124 if (buffer->heap->ops->unmap_user)
1125 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001126}
1127
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001128static struct vm_operations_struct ion_vm_ops = {
1129 .open = ion_vma_open,
1130 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001131};
1132
Laura Abbottb14ed962012-01-30 14:18:08 -08001133static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001134{
Laura Abbottb14ed962012-01-30 14:18:08 -08001135 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001136 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001137
Laura Abbottb14ed962012-01-30 14:18:08 -08001138 if (!buffer->heap->ops->map_user) {
1139 pr_err("%s: this heap does not define a method for mapping "
1140 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001141 return -EINVAL;
1142 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001143
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001144 mutex_lock(&buffer->lock);
1145 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001146 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001147
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001148 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001149 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001150 pr_err("%s: failure mapping buffer to userspace\n",
1151 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001152 } else {
1153 buffer->umap_cnt++;
1154 mutex_unlock(&buffer->lock);
1155
1156 vma->vm_ops = &ion_vm_ops;
1157 /*
1158 * move the buffer into the vm_private_data so we can access it
1159 * from vma_open/close
1160 */
1161 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001162 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001163 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001164}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001165
Laura Abbottb14ed962012-01-30 14:18:08 -08001166static void ion_dma_buf_release(struct dma_buf *dmabuf)
1167{
1168 struct ion_buffer *buffer = dmabuf->priv;
1169 ion_buffer_put(buffer);
1170}
1171
1172static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1173{
1174 struct ion_buffer *buffer = dmabuf->priv;
1175 return buffer->vaddr + offset;
1176}
1177
1178static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1179 void *ptr)
1180{
1181 return;
1182}
1183
1184static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1185 size_t len,
1186 enum dma_data_direction direction)
1187{
1188 struct ion_buffer *buffer = dmabuf->priv;
1189 void *vaddr;
1190
1191 if (!buffer->heap->ops->map_kernel) {
1192 pr_err("%s: map kernel is not implemented by this heap.\n",
1193 __func__);
1194 return -ENODEV;
1195 }
1196
1197 mutex_lock(&buffer->lock);
1198 vaddr = ion_buffer_kmap_get(buffer);
1199 mutex_unlock(&buffer->lock);
1200 if (IS_ERR(vaddr))
1201 return PTR_ERR(vaddr);
1202 if (!vaddr)
1203 return -ENOMEM;
1204 return 0;
1205}
1206
1207static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1208 size_t len,
1209 enum dma_data_direction direction)
1210{
1211 struct ion_buffer *buffer = dmabuf->priv;
1212
1213 mutex_lock(&buffer->lock);
1214 ion_buffer_kmap_put(buffer);
1215 mutex_unlock(&buffer->lock);
1216}
1217
1218struct dma_buf_ops dma_buf_ops = {
1219 .map_dma_buf = ion_map_dma_buf,
1220 .unmap_dma_buf = ion_unmap_dma_buf,
1221 .mmap = ion_mmap,
1222 .release = ion_dma_buf_release,
1223 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1224 .end_cpu_access = ion_dma_buf_end_cpu_access,
1225 .kmap_atomic = ion_dma_buf_kmap,
1226 .kunmap_atomic = ion_dma_buf_kunmap,
1227 .kmap = ion_dma_buf_kmap,
1228 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001229};
1230
Laura Abbottb14ed962012-01-30 14:18:08 -08001231static int ion_share_set_flags(struct ion_client *client,
1232 struct ion_handle *handle,
1233 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001234{
Laura Abbottb14ed962012-01-30 14:18:08 -08001235 struct ion_buffer *buffer;
1236 bool valid_handle;
1237 unsigned long ion_flags = ION_SET_CACHE(CACHED);
1238 if (flags & O_DSYNC)
1239 ion_flags = ION_SET_CACHE(UNCACHED);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001240
Laura Abbottb14ed962012-01-30 14:18:08 -08001241 mutex_lock(&client->lock);
1242 valid_handle = ion_handle_validate(client, handle);
1243 mutex_unlock(&client->lock);
1244 if (!valid_handle) {
1245 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1246 return -EINVAL;
1247 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001248
Laura Abbottb14ed962012-01-30 14:18:08 -08001249 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001250
Laura Abbottb14ed962012-01-30 14:18:08 -08001251 mutex_lock(&buffer->lock);
1252 if (ion_validate_buffer_flags(buffer, ion_flags)) {
1253 mutex_unlock(&buffer->lock);
1254 return -EEXIST;
1255 }
1256 mutex_unlock(&buffer->lock);
1257 return 0;
1258}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001259
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001260
Laura Abbottb14ed962012-01-30 14:18:08 -08001261int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1262{
1263 struct ion_buffer *buffer;
1264 struct dma_buf *dmabuf;
1265 bool valid_handle;
1266 int fd;
1267
1268 mutex_lock(&client->lock);
1269 valid_handle = ion_handle_validate(client, handle);
1270 mutex_unlock(&client->lock);
1271 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001272 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001273 return -EINVAL;
1274 }
1275
1276 buffer = handle->buffer;
1277 ion_buffer_get(buffer);
1278 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1279 if (IS_ERR(dmabuf)) {
1280 ion_buffer_put(buffer);
1281 return PTR_ERR(dmabuf);
1282 }
1283 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001284 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001285 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001286
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001287 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001288}
Olav Hauganbd453a92012-07-05 14:21:34 -07001289EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001290
Laura Abbottb14ed962012-01-30 14:18:08 -08001291struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1292{
1293 struct dma_buf *dmabuf;
1294 struct ion_buffer *buffer;
1295 struct ion_handle *handle;
1296
1297 dmabuf = dma_buf_get(fd);
1298 if (IS_ERR_OR_NULL(dmabuf))
1299 return ERR_PTR(PTR_ERR(dmabuf));
1300 /* if this memory came from ion */
1301
1302 if (dmabuf->ops != &dma_buf_ops) {
1303 pr_err("%s: can not import dmabuf from another exporter\n",
1304 __func__);
1305 dma_buf_put(dmabuf);
1306 return ERR_PTR(-EINVAL);
1307 }
1308 buffer = dmabuf->priv;
1309
1310 mutex_lock(&client->lock);
1311 /* if a handle exists for this buffer just take a reference to it */
1312 handle = ion_handle_lookup(client, buffer);
1313 if (!IS_ERR_OR_NULL(handle)) {
1314 ion_handle_get(handle);
1315 goto end;
1316 }
1317 handle = ion_handle_create(client, buffer);
1318 if (IS_ERR_OR_NULL(handle))
1319 goto end;
1320 ion_handle_add(client, handle);
1321end:
1322 mutex_unlock(&client->lock);
1323 dma_buf_put(dmabuf);
1324 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001325}
Olav Hauganbd453a92012-07-05 14:21:34 -07001326EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001327
1328static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1329{
1330 struct ion_client *client = filp->private_data;
1331
1332 switch (cmd) {
1333 case ION_IOC_ALLOC:
1334 {
1335 struct ion_allocation_data data;
1336
1337 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1338 return -EFAULT;
1339 data.handle = ion_alloc(client, data.len, data.align,
1340 data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001341
Laura Abbottb14ed962012-01-30 14:18:08 -08001342 if (IS_ERR(data.handle))
1343 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001344
Laura Abbottb14ed962012-01-30 14:18:08 -08001345 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1346 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001347 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001348 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001349 break;
1350 }
1351 case ION_IOC_FREE:
1352 {
1353 struct ion_handle_data data;
1354 bool valid;
1355
1356 if (copy_from_user(&data, (void __user *)arg,
1357 sizeof(struct ion_handle_data)))
1358 return -EFAULT;
1359 mutex_lock(&client->lock);
1360 valid = ion_handle_validate(client, data.handle);
1361 mutex_unlock(&client->lock);
1362 if (!valid)
1363 return -EINVAL;
1364 ion_free(client, data.handle);
1365 break;
1366 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001367 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001368 case ION_IOC_SHARE:
1369 {
1370 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001371 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001372 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1373 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001374
1375 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1376 if (ret)
1377 return ret;
1378
1379 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001380 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1381 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001382 if (data.fd < 0)
1383 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001384 break;
1385 }
1386 case ION_IOC_IMPORT:
1387 {
1388 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001389 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001390 if (copy_from_user(&data, (void __user *)arg,
1391 sizeof(struct ion_fd_data)))
1392 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001393 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan865e97f2012-05-15 14:40:11 -07001394 if (IS_ERR(data.handle)) {
1395 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001396 data.handle = NULL;
Olav Haugan865e97f2012-05-15 14:40:11 -07001397 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001398 if (copy_to_user((void __user *)arg, &data,
1399 sizeof(struct ion_fd_data)))
1400 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001401 if (ret < 0)
1402 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001403 break;
1404 }
1405 case ION_IOC_CUSTOM:
1406 {
1407 struct ion_device *dev = client->dev;
1408 struct ion_custom_data data;
1409
1410 if (!dev->custom_ioctl)
1411 return -ENOTTY;
1412 if (copy_from_user(&data, (void __user *)arg,
1413 sizeof(struct ion_custom_data)))
1414 return -EFAULT;
1415 return dev->custom_ioctl(client, data.cmd, data.arg);
1416 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001417 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001418 return client->dev->custom_ioctl(client,
1419 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001420 case ION_IOC_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001421 return client->dev->custom_ioctl(client,
1422 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001423 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001424 return client->dev->custom_ioctl(client,
1425 ION_IOC_CLEAN_INV_CACHES, arg);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001426 case ION_IOC_GET_FLAGS:
Mitchel Humpherysd88b8eb2012-09-04 17:00:29 -07001427 return client->dev->custom_ioctl(client,
1428 ION_IOC_GET_FLAGS, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001429 default:
1430 return -ENOTTY;
1431 }
1432 return 0;
1433}
1434
1435static int ion_release(struct inode *inode, struct file *file)
1436{
1437 struct ion_client *client = file->private_data;
1438
1439 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001440 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001441 return 0;
1442}
1443
1444static int ion_open(struct inode *inode, struct file *file)
1445{
1446 struct miscdevice *miscdev = file->private_data;
1447 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1448 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001449 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001450
1451 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001452 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1453 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001454 if (IS_ERR_OR_NULL(client))
1455 return PTR_ERR(client);
1456 file->private_data = client;
1457
1458 return 0;
1459}
1460
1461static const struct file_operations ion_fops = {
1462 .owner = THIS_MODULE,
1463 .open = ion_open,
1464 .release = ion_release,
1465 .unlocked_ioctl = ion_ioctl,
1466};
1467
1468static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001469 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001470{
1471 size_t size = 0;
1472 struct rb_node *n;
1473
1474 mutex_lock(&client->lock);
1475 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1476 struct ion_handle *handle = rb_entry(n,
1477 struct ion_handle,
1478 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001479 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001480 size += handle->buffer->size;
1481 }
1482 mutex_unlock(&client->lock);
1483 return size;
1484}
1485
Olav Haugan0671b9a2012-05-25 11:58:56 -07001486/**
1487 * Searches through a clients handles to find if the buffer is owned
1488 * by this client. Used for debug output.
1489 * @param client pointer to candidate owner of buffer
1490 * @param buf pointer to buffer that we are trying to find the owner of
1491 * @return 1 if found, 0 otherwise
1492 */
1493static int ion_debug_find_buffer_owner(const struct ion_client *client,
1494 const struct ion_buffer *buf)
1495{
1496 struct rb_node *n;
1497
1498 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1499 const struct ion_handle *handle = rb_entry(n,
1500 const struct ion_handle,
1501 node);
1502 if (handle->buffer == buf)
1503 return 1;
1504 }
1505 return 0;
1506}
1507
1508/**
1509 * Adds mem_map_data pointer to the tree of mem_map
1510 * Used for debug output.
1511 * @param mem_map The mem_map tree
1512 * @param data The new data to add to the tree
1513 */
1514static void ion_debug_mem_map_add(struct rb_root *mem_map,
1515 struct mem_map_data *data)
1516{
1517 struct rb_node **p = &mem_map->rb_node;
1518 struct rb_node *parent = NULL;
1519 struct mem_map_data *entry;
1520
1521 while (*p) {
1522 parent = *p;
1523 entry = rb_entry(parent, struct mem_map_data, node);
1524
1525 if (data->addr < entry->addr) {
1526 p = &(*p)->rb_left;
1527 } else if (data->addr > entry->addr) {
1528 p = &(*p)->rb_right;
1529 } else {
1530 pr_err("%s: mem_map_data already found.", __func__);
1531 BUG();
1532 }
1533 }
1534 rb_link_node(&data->node, parent, p);
1535 rb_insert_color(&data->node, mem_map);
1536}
1537
1538/**
1539 * Search for an owner of a buffer by iterating over all ION clients.
1540 * @param dev ion device containing pointers to all the clients.
1541 * @param buffer pointer to buffer we are trying to find the owner of.
1542 * @return name of owner.
1543 */
1544const char *ion_debug_locate_owner(const struct ion_device *dev,
1545 const struct ion_buffer *buffer)
1546{
1547 struct rb_node *j;
1548 const char *client_name = NULL;
1549
Laura Abbottb14ed962012-01-30 14:18:08 -08001550 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001551 j = rb_next(j)) {
1552 struct ion_client *client = rb_entry(j, struct ion_client,
1553 node);
1554 if (ion_debug_find_buffer_owner(client, buffer))
1555 client_name = client->name;
1556 }
1557 return client_name;
1558}
1559
1560/**
1561 * Create a mem_map of the heap.
1562 * @param s seq_file to log error message to.
1563 * @param heap The heap to create mem_map for.
1564 * @param mem_map The mem map to be created.
1565 */
1566void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1567 struct rb_root *mem_map)
1568{
1569 struct ion_device *dev = heap->dev;
1570 struct rb_node *n;
1571
1572 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1573 struct ion_buffer *buffer =
1574 rb_entry(n, struct ion_buffer, node);
1575 if (buffer->heap->id == heap->id) {
1576 struct mem_map_data *data =
1577 kzalloc(sizeof(*data), GFP_KERNEL);
1578 if (!data) {
1579 seq_printf(s, "ERROR: out of memory. "
1580 "Part of memory map will not be logged\n");
1581 break;
1582 }
1583 data->addr = buffer->priv_phys;
1584 data->addr_end = buffer->priv_phys + buffer->size-1;
1585 data->size = buffer->size;
1586 data->client_name = ion_debug_locate_owner(dev, buffer);
1587 ion_debug_mem_map_add(mem_map, data);
1588 }
1589 }
1590}
1591
1592/**
1593 * Free the memory allocated by ion_debug_mem_map_create
1594 * @param mem_map The mem map to free.
1595 */
1596static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1597{
1598 if (mem_map) {
1599 struct rb_node *n;
1600 while ((n = rb_first(mem_map)) != 0) {
1601 struct mem_map_data *data =
1602 rb_entry(n, struct mem_map_data, node);
1603 rb_erase(&data->node, mem_map);
1604 kfree(data);
1605 }
1606 }
1607}
1608
1609/**
1610 * Print heap debug information.
1611 * @param s seq_file to log message to.
1612 * @param heap pointer to heap that we will print debug information for.
1613 */
1614static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1615{
1616 if (heap->ops->print_debug) {
1617 struct rb_root mem_map = RB_ROOT;
1618 ion_debug_mem_map_create(s, heap, &mem_map);
1619 heap->ops->print_debug(heap, s, &mem_map);
1620 ion_debug_mem_map_destroy(&mem_map);
1621 }
1622}
1623
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001624static int ion_debug_heap_show(struct seq_file *s, void *unused)
1625{
1626 struct ion_heap *heap = s->private;
1627 struct ion_device *dev = heap->dev;
1628 struct rb_node *n;
1629
Olav Haugane4900b52012-05-25 11:58:03 -07001630 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001631 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001632
Laura Abbottb14ed962012-01-30 14:18:08 -08001633 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001634 struct ion_client *client = rb_entry(n, struct ion_client,
1635 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001636 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001637 if (!size)
1638 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001639 if (client->task) {
1640 char task_comm[TASK_COMM_LEN];
1641
1642 get_task_comm(task_comm, client->task);
1643 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1644 client->pid, size);
1645 } else {
1646 seq_printf(s, "%16.s %16u %16u\n", client->name,
1647 client->pid, size);
1648 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001649 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001650 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001651 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001652 return 0;
1653}
1654
1655static int ion_debug_heap_open(struct inode *inode, struct file *file)
1656{
1657 return single_open(file, ion_debug_heap_show, inode->i_private);
1658}
1659
1660static const struct file_operations debug_heap_fops = {
1661 .open = ion_debug_heap_open,
1662 .read = seq_read,
1663 .llseek = seq_lseek,
1664 .release = single_release,
1665};
1666
1667void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1668{
1669 struct rb_node **p = &dev->heaps.rb_node;
1670 struct rb_node *parent = NULL;
1671 struct ion_heap *entry;
1672
Laura Abbottb14ed962012-01-30 14:18:08 -08001673 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1674 !heap->ops->unmap_dma)
1675 pr_err("%s: can not add heap with invalid ops struct.\n",
1676 __func__);
1677
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001678 heap->dev = dev;
1679 mutex_lock(&dev->lock);
1680 while (*p) {
1681 parent = *p;
1682 entry = rb_entry(parent, struct ion_heap, node);
1683
1684 if (heap->id < entry->id) {
1685 p = &(*p)->rb_left;
1686 } else if (heap->id > entry->id ) {
1687 p = &(*p)->rb_right;
1688 } else {
1689 pr_err("%s: can not insert multiple heaps with "
1690 "id %d\n", __func__, heap->id);
1691 goto end;
1692 }
1693 }
1694
1695 rb_link_node(&heap->node, parent, p);
1696 rb_insert_color(&heap->node, &dev->heaps);
1697 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1698 &debug_heap_fops);
1699end:
1700 mutex_unlock(&dev->lock);
1701}
1702
Laura Abbott7e446482012-06-13 15:59:39 -07001703int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1704 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001705{
1706 struct rb_node *n;
1707 int ret_val = 0;
1708
1709 /*
1710 * traverse the list of heaps available in this system
1711 * and find the heap that is specified.
1712 */
1713 mutex_lock(&dev->lock);
1714 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1715 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1716 if (heap->type != ION_HEAP_TYPE_CP)
1717 continue;
1718 if (ION_HEAP(heap->id) != heap_id)
1719 continue;
1720 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001721 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001722 else
1723 ret_val = -EINVAL;
1724 break;
1725 }
1726 mutex_unlock(&dev->lock);
1727 return ret_val;
1728}
Olav Hauganbd453a92012-07-05 14:21:34 -07001729EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001730
Laura Abbott7e446482012-06-13 15:59:39 -07001731int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1732 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001733{
1734 struct rb_node *n;
1735 int ret_val = 0;
1736
1737 /*
1738 * traverse the list of heaps available in this system
1739 * and find the heap that is specified.
1740 */
1741 mutex_lock(&dev->lock);
1742 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1743 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1744 if (heap->type != ION_HEAP_TYPE_CP)
1745 continue;
1746 if (ION_HEAP(heap->id) != heap_id)
1747 continue;
1748 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001749 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001750 else
1751 ret_val = -EINVAL;
1752 break;
1753 }
1754 mutex_unlock(&dev->lock);
1755 return ret_val;
1756}
Olav Hauganbd453a92012-07-05 14:21:34 -07001757EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001758
Laura Abbott404f8242011-10-31 14:22:53 -07001759static int ion_debug_leak_show(struct seq_file *s, void *unused)
1760{
1761 struct ion_device *dev = s->private;
1762 struct rb_node *n;
1763 struct rb_node *n2;
1764
1765 /* mark all buffers as 1 */
1766 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1767 "ref cnt");
1768 mutex_lock(&dev->lock);
1769 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1770 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1771 node);
1772
1773 buf->marked = 1;
1774 }
1775
1776 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001777 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001778 struct ion_client *client = rb_entry(n, struct ion_client,
1779 node);
1780
1781 mutex_lock(&client->lock);
1782 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1783 struct ion_handle *handle = rb_entry(n2,
1784 struct ion_handle, node);
1785
1786 handle->buffer->marked = 0;
1787
1788 }
1789 mutex_unlock(&client->lock);
1790
1791 }
1792
Laura Abbott404f8242011-10-31 14:22:53 -07001793 /* And anyone still marked as a 1 means a leaked handle somewhere */
1794 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1795 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1796 node);
1797
1798 if (buf->marked == 1)
1799 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1800 (int)buf, buf->heap->name, buf->size,
1801 atomic_read(&buf->ref.refcount));
1802 }
1803 mutex_unlock(&dev->lock);
1804 return 0;
1805}
1806
1807static int ion_debug_leak_open(struct inode *inode, struct file *file)
1808{
1809 return single_open(file, ion_debug_leak_show, inode->i_private);
1810}
1811
1812static const struct file_operations debug_leak_fops = {
1813 .open = ion_debug_leak_open,
1814 .read = seq_read,
1815 .llseek = seq_lseek,
1816 .release = single_release,
1817};
1818
1819
1820
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001821struct ion_device *ion_device_create(long (*custom_ioctl)
1822 (struct ion_client *client,
1823 unsigned int cmd,
1824 unsigned long arg))
1825{
1826 struct ion_device *idev;
1827 int ret;
1828
1829 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1830 if (!idev)
1831 return ERR_PTR(-ENOMEM);
1832
1833 idev->dev.minor = MISC_DYNAMIC_MINOR;
1834 idev->dev.name = "ion";
1835 idev->dev.fops = &ion_fops;
1836 idev->dev.parent = NULL;
1837 ret = misc_register(&idev->dev);
1838 if (ret) {
1839 pr_err("ion: failed to register misc device.\n");
1840 return ERR_PTR(ret);
1841 }
1842
1843 idev->debug_root = debugfs_create_dir("ion", NULL);
1844 if (IS_ERR_OR_NULL(idev->debug_root))
1845 pr_err("ion: failed to create debug files.\n");
1846
1847 idev->custom_ioctl = custom_ioctl;
1848 idev->buffers = RB_ROOT;
1849 mutex_init(&idev->lock);
1850 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001851 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001852 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1853 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001854 return idev;
1855}
1856
1857void ion_device_destroy(struct ion_device *dev)
1858{
1859 misc_deregister(&dev->dev);
1860 /* XXX need to free the heaps and clients ? */
1861 kfree(dev);
1862}
Laura Abbottb14ed962012-01-30 14:18:08 -08001863
1864void __init ion_reserve(struct ion_platform_data *data)
1865{
1866 int i, ret;
1867
1868 for (i = 0; i < data->nr; i++) {
1869 if (data->heaps[i].size == 0)
1870 continue;
1871 ret = memblock_reserve(data->heaps[i].base,
1872 data->heaps[i].size);
1873 if (ret)
1874 pr_err("memblock reserve of %x@%lx failed\n",
1875 data->heaps[i].size,
1876 data->heaps[i].base);
1877 }
1878}