blob: 31bbb1f9f785c1a50eb1c00a6df58cc6a2def2ca [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070036
Laura Abbott8c017362011-09-22 20:59:12 -070037#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070038#include "ion_priv.h"
39#define DEBUG
40
41/**
42 * struct ion_device - the metadata of the ion device node
43 * @dev: the actual misc device
44 * @buffers: an rb tree of all the existing buffers
45 * @lock: lock protecting the buffers & heaps trees
46 * @heaps: list of all the heaps in the system
47 * @user_clients: list of all the clients created from userspace
48 */
49struct ion_device {
50 struct miscdevice dev;
51 struct rb_root buffers;
52 struct mutex lock;
53 struct rb_root heaps;
54 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
55 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080056 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070057 struct dentry *debug_root;
58};
59
60/**
61 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 * @node: node in the tree of all clients
63 * @dev: backpointer to ion device
64 * @handles: an rb tree of all the handles in this client
65 * @lock: lock protecting the tree of handles
66 * @heap_mask: mask of all supported heaps
67 * @name: used for debugging
68 * @task: used for debugging
69 *
70 * A client represents a list of buffers this client may access.
71 * The mutex stored here is used to protect both handles tree
72 * as well as the handles themselves, and should be held while modifying either.
73 */
74struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070075 struct rb_node node;
76 struct ion_device *dev;
77 struct rb_root handles;
78 struct mutex lock;
79 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080080 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070081 struct task_struct *task;
82 pid_t pid;
83 struct dentry *debug_root;
84};
85
86/**
87 * ion_handle - a client local reference to a buffer
88 * @ref: reference count
89 * @client: back pointer to the client the buffer resides in
90 * @buffer: pointer to the buffer
91 * @node: node in the client's handle rbtree
92 * @kmap_cnt: count of times this client has mapped to kernel
93 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070094 *
95 * Modifications to node, map_cnt or mapping should be protected by the
96 * lock in the client. Other fields are never changed after initialization.
97 */
98struct ion_handle {
99 struct kref ref;
100 struct ion_client *client;
101 struct ion_buffer *buffer;
102 struct rb_node node;
103 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700104 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700105};
106
Olav Hauganb3676592012-03-02 15:02:25 -0800107static void ion_iommu_release(struct kref *kref);
108
Laura Abbott8c017362011-09-22 20:59:12 -0700109static int ion_validate_buffer_flags(struct ion_buffer *buffer,
110 unsigned long flags)
111{
112 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
113 buffer->iommu_map_cnt) {
114 if (buffer->flags != flags) {
115 pr_err("%s: buffer was already mapped with flags %lx,"
116 " cannot map with flags %lx\n", __func__,
117 buffer->flags, flags);
118 return 1;
119 }
120
121 } else {
122 buffer->flags = flags;
123 }
124 return 0;
125}
126
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700127/* this function should only be called while dev->lock is held */
128static void ion_buffer_add(struct ion_device *dev,
129 struct ion_buffer *buffer)
130{
131 struct rb_node **p = &dev->buffers.rb_node;
132 struct rb_node *parent = NULL;
133 struct ion_buffer *entry;
134
135 while (*p) {
136 parent = *p;
137 entry = rb_entry(parent, struct ion_buffer, node);
138
139 if (buffer < entry) {
140 p = &(*p)->rb_left;
141 } else if (buffer > entry) {
142 p = &(*p)->rb_right;
143 } else {
144 pr_err("%s: buffer already found.", __func__);
145 BUG();
146 }
147 }
148
149 rb_link_node(&buffer->node, parent, p);
150 rb_insert_color(&buffer->node, &dev->buffers);
151}
152
Olav Haugan0fa9b602012-01-25 11:50:38 -0800153static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700154 struct ion_iommu_map *iommu)
155{
156 struct rb_node **p = &buffer->iommu_maps.rb_node;
157 struct rb_node *parent = NULL;
158 struct ion_iommu_map *entry;
159
160 while (*p) {
161 parent = *p;
162 entry = rb_entry(parent, struct ion_iommu_map, node);
163
164 if (iommu->key < entry->key) {
165 p = &(*p)->rb_left;
166 } else if (iommu->key > entry->key) {
167 p = &(*p)->rb_right;
168 } else {
169 pr_err("%s: buffer %p already has mapping for domain %d"
170 " and partition %d\n", __func__,
171 buffer,
172 iommu_map_domain(iommu),
173 iommu_map_partition(iommu));
174 BUG();
175 }
176 }
177
178 rb_link_node(&iommu->node, parent, p);
179 rb_insert_color(&iommu->node, &buffer->iommu_maps);
180
181}
182
183static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
184 unsigned int domain_no,
185 unsigned int partition_no)
186{
187 struct rb_node **p = &buffer->iommu_maps.rb_node;
188 struct rb_node *parent = NULL;
189 struct ion_iommu_map *entry;
190 uint64_t key = domain_no;
191 key = key << 32 | partition_no;
192
193 while (*p) {
194 parent = *p;
195 entry = rb_entry(parent, struct ion_iommu_map, node);
196
197 if (key < entry->key)
198 p = &(*p)->rb_left;
199 else if (key > entry->key)
200 p = &(*p)->rb_right;
201 else
202 return entry;
203 }
204
205 return NULL;
206}
207
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700208/* this function should only be called while dev->lock is held */
209static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
210 struct ion_device *dev,
211 unsigned long len,
212 unsigned long align,
213 unsigned long flags)
214{
215 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800216 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700217 int ret;
218
219 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
220 if (!buffer)
221 return ERR_PTR(-ENOMEM);
222
223 buffer->heap = heap;
224 kref_init(&buffer->ref);
225
226 ret = heap->ops->allocate(heap, buffer, len, align, flags);
227 if (ret) {
228 kfree(buffer);
229 return ERR_PTR(ret);
230 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800231
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700232 buffer->dev = dev;
233 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800234
235 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
236 if (IS_ERR_OR_NULL(table)) {
237 heap->ops->free(buffer);
238 kfree(buffer);
239 return ERR_PTR(PTR_ERR(table));
240 }
241 buffer->sg_table = table;
242
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700243 mutex_init(&buffer->lock);
244 ion_buffer_add(dev, buffer);
245 return buffer;
246}
247
Olav Hauganb3676592012-03-02 15:02:25 -0800248/**
249 * Check for delayed IOMMU unmapping. Also unmap any outstanding
250 * mappings which would otherwise have been leaked.
251 */
252static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
253{
254 struct ion_iommu_map *iommu_map;
255 struct rb_node *node;
256 const struct rb_root *rb = &(buffer->iommu_maps);
257 unsigned long ref_count;
258 unsigned int delayed_unmap;
259
260 mutex_lock(&buffer->lock);
261
262 while ((node = rb_first(rb)) != 0) {
263 iommu_map = rb_entry(node, struct ion_iommu_map, node);
264 ref_count = atomic_read(&iommu_map->ref.refcount);
265 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
266
267 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
268 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
269 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
270 iommu_map->domain_info[DI_PARTITION_NUM]);
271 }
272 /* set ref count to 1 to force release */
273 kref_init(&iommu_map->ref);
274 kref_put(&iommu_map->ref, ion_iommu_release);
275 }
276
277 mutex_unlock(&buffer->lock);
278}
279
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700280static void ion_buffer_destroy(struct kref *kref)
281{
282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283 struct ion_device *dev = buffer->dev;
284
Laura Abbottb14ed962012-01-30 14:18:08 -0800285 if (WARN_ON(buffer->kmap_cnt > 0))
286 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
287
288 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
289
Olav Hauganb3676592012-03-02 15:02:25 -0800290 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700291 buffer->heap->ops->free(buffer);
292 mutex_lock(&dev->lock);
293 rb_erase(&buffer->node, &dev->buffers);
294 mutex_unlock(&dev->lock);
295 kfree(buffer);
296}
297
298static void ion_buffer_get(struct ion_buffer *buffer)
299{
300 kref_get(&buffer->ref);
301}
302
303static int ion_buffer_put(struct ion_buffer *buffer)
304{
305 return kref_put(&buffer->ref, ion_buffer_destroy);
306}
307
308static struct ion_handle *ion_handle_create(struct ion_client *client,
309 struct ion_buffer *buffer)
310{
311 struct ion_handle *handle;
312
313 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
314 if (!handle)
315 return ERR_PTR(-ENOMEM);
316 kref_init(&handle->ref);
317 rb_init_node(&handle->node);
318 handle->client = client;
319 ion_buffer_get(buffer);
320 handle->buffer = buffer;
321
322 return handle;
323}
324
Laura Abbottb14ed962012-01-30 14:18:08 -0800325static void ion_handle_kmap_put(struct ion_handle *);
326
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700327static void ion_handle_destroy(struct kref *kref)
328{
329 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800330 struct ion_client *client = handle->client;
331 struct ion_buffer *buffer = handle->buffer;
332
Laura Abbottb14ed962012-01-30 14:18:08 -0800333 mutex_lock(&buffer->lock);
334 while (handle->kmap_cnt)
335 ion_handle_kmap_put(handle);
336 mutex_unlock(&buffer->lock);
337
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700338 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800339 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800340
341 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700342 kfree(handle);
343}
344
345struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
346{
347 return handle->buffer;
348}
349
350static void ion_handle_get(struct ion_handle *handle)
351{
352 kref_get(&handle->ref);
353}
354
355static int ion_handle_put(struct ion_handle *handle)
356{
357 return kref_put(&handle->ref, ion_handle_destroy);
358}
359
360static struct ion_handle *ion_handle_lookup(struct ion_client *client,
361 struct ion_buffer *buffer)
362{
363 struct rb_node *n;
364
365 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
366 struct ion_handle *handle = rb_entry(n, struct ion_handle,
367 node);
368 if (handle->buffer == buffer)
369 return handle;
370 }
371 return NULL;
372}
373
374static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
375{
376 struct rb_node *n = client->handles.rb_node;
377
378 while (n) {
379 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
380 node);
381 if (handle < handle_node)
382 n = n->rb_left;
383 else if (handle > handle_node)
384 n = n->rb_right;
385 else
386 return true;
387 }
388 return false;
389}
390
391static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
392{
393 struct rb_node **p = &client->handles.rb_node;
394 struct rb_node *parent = NULL;
395 struct ion_handle *entry;
396
397 while (*p) {
398 parent = *p;
399 entry = rb_entry(parent, struct ion_handle, node);
400
401 if (handle < entry)
402 p = &(*p)->rb_left;
403 else if (handle > entry)
404 p = &(*p)->rb_right;
405 else
406 WARN(1, "%s: buffer already found.", __func__);
407 }
408
409 rb_link_node(&handle->node, parent, p);
410 rb_insert_color(&handle->node, &client->handles);
411}
412
413struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
414 size_t align, unsigned int flags)
415{
416 struct rb_node *n;
417 struct ion_handle *handle;
418 struct ion_device *dev = client->dev;
419 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800420 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800421 const unsigned int MAX_DBG_STR_LEN = 64;
422 char dbg_str[MAX_DBG_STR_LEN];
423 unsigned int dbg_str_idx = 0;
424
425 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700426
427 /*
428 * traverse the list of heaps available in this system in priority
429 * order. If the heap type is supported by the client, and matches the
430 * request of the caller allocate from it. Repeat until allocate has
431 * succeeded or all heaps have been tried
432 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800433 if (WARN_ON(!len))
434 return ERR_PTR(-EINVAL);
435
436 len = PAGE_ALIGN(len);
437
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700438 mutex_lock(&dev->lock);
439 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
440 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
441 /* if the client doesn't support this heap type */
442 if (!((1 << heap->type) & client->heap_mask))
443 continue;
444 /* if the caller didn't specify this heap type */
445 if (!((1 << heap->id) & flags))
446 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800447 /* Do not allow un-secure heap if secure is specified */
448 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
449 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700450 buffer = ion_buffer_create(heap, dev, len, align, flags);
451 if (!IS_ERR_OR_NULL(buffer))
452 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800453 if (dbg_str_idx < MAX_DBG_STR_LEN) {
454 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
455 int ret_value = snprintf(&dbg_str[dbg_str_idx],
456 len_left, "%s ", heap->name);
457 if (ret_value >= len_left) {
458 /* overflow */
459 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
460 dbg_str_idx = MAX_DBG_STR_LEN;
461 } else if (ret_value >= 0) {
462 dbg_str_idx += ret_value;
463 } else {
464 /* error */
465 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
466 }
467 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700468 }
469 mutex_unlock(&dev->lock);
470
Laura Abbottb14ed962012-01-30 14:18:08 -0800471 if (buffer == NULL)
472 return ERR_PTR(-ENODEV);
473
474 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800475 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
476 "0x%x) from heap(s) %sfor client %s with heap "
477 "mask 0x%x\n",
478 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700479 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800480 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700481
482 handle = ion_handle_create(client, buffer);
483
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700484 /*
485 * ion_buffer_create will create a buffer with a ref_cnt of 1,
486 * and ion_handle_create will take a second reference, drop one here
487 */
488 ion_buffer_put(buffer);
489
Laura Abbottb14ed962012-01-30 14:18:08 -0800490 if (!IS_ERR(handle)) {
491 mutex_lock(&client->lock);
492 ion_handle_add(client, handle);
493 mutex_unlock(&client->lock);
494 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700495
Laura Abbottb14ed962012-01-30 14:18:08 -0800496
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700497 return handle;
498}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800499EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700500
501void ion_free(struct ion_client *client, struct ion_handle *handle)
502{
503 bool valid_handle;
504
505 BUG_ON(client != handle->client);
506
507 mutex_lock(&client->lock);
508 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700509 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800510 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700511 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700512 return;
513 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800514 ion_handle_put(handle);
Rebecca Schultz Zavinaad11cb2012-08-20 15:41:11 -0700515 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700516}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800517EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700518
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700519int ion_phys(struct ion_client *client, struct ion_handle *handle,
520 ion_phys_addr_t *addr, size_t *len)
521{
522 struct ion_buffer *buffer;
523 int ret;
524
525 mutex_lock(&client->lock);
526 if (!ion_handle_validate(client, handle)) {
527 mutex_unlock(&client->lock);
528 return -EINVAL;
529 }
530
531 buffer = handle->buffer;
532
533 if (!buffer->heap->ops->phys) {
534 pr_err("%s: ion_phys is not implemented by this heap.\n",
535 __func__);
536 mutex_unlock(&client->lock);
537 return -ENODEV;
538 }
539 mutex_unlock(&client->lock);
540 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
541 return ret;
542}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800543EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700544
Laura Abbottb14ed962012-01-30 14:18:08 -0800545static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700546{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700547 void *vaddr;
548
Laura Abbottb14ed962012-01-30 14:18:08 -0800549 if (buffer->kmap_cnt) {
550 buffer->kmap_cnt++;
551 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700552 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800553 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
554 if (IS_ERR_OR_NULL(vaddr))
555 return vaddr;
556 buffer->vaddr = vaddr;
557 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700558 return vaddr;
559}
Laura Abbottb14ed962012-01-30 14:18:08 -0800560
561static void *ion_handle_kmap_get(struct ion_handle *handle)
562{
563 struct ion_buffer *buffer = handle->buffer;
564 void *vaddr;
565
566 if (handle->kmap_cnt) {
567 handle->kmap_cnt++;
568 return buffer->vaddr;
569 }
570 vaddr = ion_buffer_kmap_get(buffer);
571 if (IS_ERR_OR_NULL(vaddr))
572 return vaddr;
573 handle->kmap_cnt++;
574 return vaddr;
575}
576
577static void ion_buffer_kmap_put(struct ion_buffer *buffer)
578{
579 buffer->kmap_cnt--;
580 if (!buffer->kmap_cnt) {
581 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
582 buffer->vaddr = NULL;
583 }
584}
585
586static void ion_handle_kmap_put(struct ion_handle *handle)
587{
588 struct ion_buffer *buffer = handle->buffer;
589
590 handle->kmap_cnt--;
591 if (!handle->kmap_cnt)
592 ion_buffer_kmap_put(buffer);
593}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700594
Olav Hauganb3676592012-03-02 15:02:25 -0800595static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700596 int domain_num, int partition_num, unsigned long align,
597 unsigned long iova_length, unsigned long flags,
598 unsigned long *iova)
599{
600 struct ion_iommu_map *data;
601 int ret;
602
603 data = kmalloc(sizeof(*data), GFP_ATOMIC);
604
605 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800606 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700607
608 data->buffer = buffer;
609 iommu_map_domain(data) = domain_num;
610 iommu_map_partition(data) = partition_num;
611
612 ret = buffer->heap->ops->map_iommu(buffer, data,
613 domain_num,
614 partition_num,
615 align,
616 iova_length,
617 flags);
618
619 if (ret)
620 goto out;
621
622 kref_init(&data->ref);
623 *iova = data->iova_addr;
624
625 ion_iommu_add(buffer, data);
626
Olav Hauganb3676592012-03-02 15:02:25 -0800627 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700628
629out:
Laura Abbott8c017362011-09-22 20:59:12 -0700630 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800631 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700632}
633
634int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
635 int domain_num, int partition_num, unsigned long align,
636 unsigned long iova_length, unsigned long *iova,
637 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800638 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700639{
640 struct ion_buffer *buffer;
641 struct ion_iommu_map *iommu_map;
642 int ret = 0;
643
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800644 if (ION_IS_CACHED(flags)) {
645 pr_err("%s: Cannot map iommu as cached.\n", __func__);
646 return -EINVAL;
647 }
648
Laura Abbott8c017362011-09-22 20:59:12 -0700649 mutex_lock(&client->lock);
650 if (!ion_handle_validate(client, handle)) {
651 pr_err("%s: invalid handle passed to map_kernel.\n",
652 __func__);
653 mutex_unlock(&client->lock);
654 return -EINVAL;
655 }
656
657 buffer = handle->buffer;
658 mutex_lock(&buffer->lock);
659
660 if (!handle->buffer->heap->ops->map_iommu) {
661 pr_err("%s: map_iommu is not implemented by this heap.\n",
662 __func__);
663 ret = -ENODEV;
664 goto out;
665 }
666
Laura Abbott8c017362011-09-22 20:59:12 -0700667 /*
668 * If clients don't want a custom iova length, just use whatever
669 * the buffer size is
670 */
671 if (!iova_length)
672 iova_length = buffer->size;
673
674 if (buffer->size > iova_length) {
675 pr_debug("%s: iova length %lx is not at least buffer size"
676 " %x\n", __func__, iova_length, buffer->size);
677 ret = -EINVAL;
678 goto out;
679 }
680
681 if (buffer->size & ~PAGE_MASK) {
682 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
683 buffer->size, PAGE_SIZE);
684 ret = -EINVAL;
685 goto out;
686 }
687
688 if (iova_length & ~PAGE_MASK) {
689 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
690 iova_length, PAGE_SIZE);
691 ret = -EINVAL;
692 goto out;
693 }
694
695 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800696 if (!iommu_map) {
697 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
698 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800699 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800700 iommu_map->flags = iommu_flags;
701
702 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
703 kref_get(&iommu_map->ref);
704 }
Laura Abbott8c017362011-09-22 20:59:12 -0700705 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800706 if (iommu_map->flags != iommu_flags) {
707 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
708 __func__, handle,
709 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800710 ret = -EINVAL;
711 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700712 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800713 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700714 __func__, handle, iommu_map->mapped_size,
715 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700716 ret = -EINVAL;
717 } else {
718 kref_get(&iommu_map->ref);
719 *iova = iommu_map->iova_addr;
720 }
721 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800722 if (!ret)
723 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700724 *buffer_size = buffer->size;
725out:
726 mutex_unlock(&buffer->lock);
727 mutex_unlock(&client->lock);
728 return ret;
729}
730EXPORT_SYMBOL(ion_map_iommu);
731
732static void ion_iommu_release(struct kref *kref)
733{
734 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
735 ref);
736 struct ion_buffer *buffer = map->buffer;
737
738 rb_erase(&map->node, &buffer->iommu_maps);
739 buffer->heap->ops->unmap_iommu(map);
740 kfree(map);
741}
742
743void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
744 int domain_num, int partition_num)
745{
746 struct ion_iommu_map *iommu_map;
747 struct ion_buffer *buffer;
748
749 mutex_lock(&client->lock);
750 buffer = handle->buffer;
751
752 mutex_lock(&buffer->lock);
753
754 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
755
756 if (!iommu_map) {
757 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
758 domain_num, partition_num, buffer);
759 goto out;
760 }
761
Laura Abbott8c017362011-09-22 20:59:12 -0700762 kref_put(&iommu_map->ref, ion_iommu_release);
763
Laura Abbottb14ed962012-01-30 14:18:08 -0800764 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700765out:
766 mutex_unlock(&buffer->lock);
767
768 mutex_unlock(&client->lock);
769
770}
771EXPORT_SYMBOL(ion_unmap_iommu);
772
Laura Abbottb14ed962012-01-30 14:18:08 -0800773void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
774 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700775{
776 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800777 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700778
779 mutex_lock(&client->lock);
780 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800781 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700782 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700783 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700784 return ERR_PTR(-EINVAL);
785 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700786
Laura Abbottb14ed962012-01-30 14:18:08 -0800787 buffer = handle->buffer;
788
789 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700790 pr_err("%s: map_kernel is not implemented by this heap.\n",
791 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700792 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700793 return ERR_PTR(-ENODEV);
794 }
Laura Abbott894fd582011-08-19 13:33:56 -0700795
Laura Abbott8c017362011-09-22 20:59:12 -0700796 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800797 mutex_unlock(&client->lock);
798 return ERR_PTR(-EEXIST);
Laura Abbott894fd582011-08-19 13:33:56 -0700799 }
800
Laura Abbottb14ed962012-01-30 14:18:08 -0800801 mutex_lock(&buffer->lock);
802 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700803 mutex_unlock(&buffer->lock);
804 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800805 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700806}
Olav Hauganbd453a92012-07-05 14:21:34 -0700807EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700808
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700809void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
810{
811 struct ion_buffer *buffer;
812
813 mutex_lock(&client->lock);
814 buffer = handle->buffer;
815 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800816 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700817 mutex_unlock(&buffer->lock);
818 mutex_unlock(&client->lock);
819}
Olav Hauganbd453a92012-07-05 14:21:34 -0700820EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700821
Laura Abbottabcb6f72011-10-04 16:26:49 -0700822static int check_vaddr_bounds(unsigned long start, unsigned long end)
823{
824 struct mm_struct *mm = current->active_mm;
825 struct vm_area_struct *vma;
826 int ret = 1;
827
828 if (end < start)
829 goto out;
830
831 down_read(&mm->mmap_sem);
832 vma = find_vma(mm, start);
833 if (vma && vma->vm_start < end) {
834 if (start < vma->vm_start)
835 goto out_up;
836 if (end > vma->vm_end)
837 goto out_up;
838 ret = 0;
839 }
840
841out_up:
842 up_read(&mm->mmap_sem);
843out:
844 return ret;
845}
846
Olav Haugan41f85792012-02-08 15:28:05 -0800847int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700848 void *uaddr, unsigned long offset, unsigned long len,
849 unsigned int cmd)
850{
851 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700852 int ret = -EINVAL;
853
854 mutex_lock(&client->lock);
855 if (!ion_handle_validate(client, handle)) {
856 pr_err("%s: invalid handle passed to do_cache_op.\n",
857 __func__);
858 mutex_unlock(&client->lock);
859 return -EINVAL;
860 }
861 buffer = handle->buffer;
862 mutex_lock(&buffer->lock);
863
Laura Abbottcbaa6682011-10-19 12:14:14 -0700864 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700865 ret = 0;
866 goto out;
867 }
868
869 if (!handle->buffer->heap->ops->cache_op) {
870 pr_err("%s: cache_op is not implemented by this heap.\n",
871 __func__);
872 ret = -ENODEV;
873 goto out;
874 }
875
Laura Abbottabcb6f72011-10-04 16:26:49 -0700876
877 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
878 offset, len, cmd);
879
880out:
881 mutex_unlock(&buffer->lock);
882 mutex_unlock(&client->lock);
883 return ret;
884
885}
Olav Hauganbd453a92012-07-05 14:21:34 -0700886EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700887
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700888static int ion_debug_client_show(struct seq_file *s, void *unused)
889{
890 struct ion_client *client = s->private;
891 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700892 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700893
Olav Haugan854c9e12012-05-16 16:34:28 -0700894 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
895 "heap_name", "size_in_bytes", "handle refcount",
896 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700897
898 mutex_lock(&client->lock);
899 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
900 struct ion_handle *handle = rb_entry(n, struct ion_handle,
901 node);
902 enum ion_heap_type type = handle->buffer->heap->type;
903
Olav Haugan854c9e12012-05-16 16:34:28 -0700904 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700905 handle->buffer->heap->name,
906 handle->buffer->size,
907 atomic_read(&handle->ref.refcount),
908 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700909
910 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
911 type == ION_HEAP_TYPE_CARVEOUT ||
912 type == ION_HEAP_TYPE_CP)
913 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
914 else
915 seq_printf(s, " : %12s", "N/A");
916
917 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
918 n2 = rb_next(n2)) {
919 struct ion_iommu_map *imap =
920 rb_entry(n2, struct ion_iommu_map, node);
921 seq_printf(s, " : [%d,%d] - %8lx",
922 imap->domain_info[DI_DOMAIN_NUM],
923 imap->domain_info[DI_PARTITION_NUM],
924 imap->iova_addr);
925 }
926 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700927 }
928 mutex_unlock(&client->lock);
929
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700930 return 0;
931}
932
933static int ion_debug_client_open(struct inode *inode, struct file *file)
934{
935 return single_open(file, ion_debug_client_show, inode->i_private);
936}
937
938static const struct file_operations debug_client_fops = {
939 .open = ion_debug_client_open,
940 .read = seq_read,
941 .llseek = seq_lseek,
942 .release = single_release,
943};
944
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700945struct ion_client *ion_client_create(struct ion_device *dev,
946 unsigned int heap_mask,
947 const char *name)
948{
949 struct ion_client *client;
950 struct task_struct *task;
951 struct rb_node **p;
952 struct rb_node *parent = NULL;
953 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700954 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700955 unsigned int name_len;
956
957 if (!name) {
958 pr_err("%s: Name cannot be null\n", __func__);
959 return ERR_PTR(-EINVAL);
960 }
961 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700962
963 get_task_struct(current->group_leader);
964 task_lock(current->group_leader);
965 pid = task_pid_nr(current->group_leader);
966 /* don't bother to store task struct for kernel threads,
967 they can't be killed anyway */
968 if (current->group_leader->flags & PF_KTHREAD) {
969 put_task_struct(current->group_leader);
970 task = NULL;
971 } else {
972 task = current->group_leader;
973 }
974 task_unlock(current->group_leader);
975
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700976 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
977 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800978 if (task)
979 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700980 return ERR_PTR(-ENOMEM);
981 }
982
983 client->dev = dev;
984 client->handles = RB_ROOT;
985 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800986
Olav Haugan6625c7d12012-01-24 13:50:43 -0800987 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800988 if (!client->name) {
989 put_task_struct(current->group_leader);
990 kfree(client);
991 return ERR_PTR(-ENOMEM);
992 } else {
Olav Haugan6625c7d12012-01-24 13:50:43 -0800993 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800994 }
995
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700996 client->heap_mask = heap_mask;
997 client->task = task;
998 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700999
1000 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001001 p = &dev->clients.rb_node;
1002 while (*p) {
1003 parent = *p;
1004 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001005
Laura Abbottb14ed962012-01-30 14:18:08 -08001006 if (client < entry)
1007 p = &(*p)->rb_left;
1008 else if (client > entry)
1009 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001010 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001011 rb_link_node(&client->node, parent, p);
1012 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001013
Laura Abbotteed86032011-12-05 15:32:36 -08001014
1015 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016 dev->debug_root, client,
1017 &debug_client_fops);
1018 mutex_unlock(&dev->lock);
1019
1020 return client;
1021}
1022
Laura Abbottb14ed962012-01-30 14:18:08 -08001023void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001024{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001025 struct ion_device *dev = client->dev;
1026 struct rb_node *n;
1027
1028 pr_debug("%s: %d\n", __func__, __LINE__);
1029 while ((n = rb_first(&client->handles))) {
1030 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1031 node);
1032 ion_handle_destroy(&handle->ref);
1033 }
1034 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001035 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001036 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001037 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001038 debugfs_remove_recursive(client->debug_root);
1039 mutex_unlock(&dev->lock);
1040
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001041 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001042 kfree(client);
1043}
Olav Hauganbd453a92012-07-05 14:21:34 -07001044EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045
Laura Abbott273dd8e2011-10-12 14:26:33 -07001046int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1047 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001048{
1049 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001050
1051 mutex_lock(&client->lock);
1052 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001053 pr_err("%s: invalid handle passed to %s.\n",
1054 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001055 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001056 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001057 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001058 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001059 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001060 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001061 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001062 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001063
Laura Abbott273dd8e2011-10-12 14:26:33 -07001064 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001065}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001066EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001067
Laura Abbott8c017362011-09-22 20:59:12 -07001068int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1069 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001070{
Laura Abbott8c017362011-09-22 20:59:12 -07001071 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001072
Laura Abbott8c017362011-09-22 20:59:12 -07001073 mutex_lock(&client->lock);
1074 if (!ion_handle_validate(client, handle)) {
1075 pr_err("%s: invalid handle passed to %s.\n",
1076 __func__, __func__);
1077 mutex_unlock(&client->lock);
1078 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001079 }
Laura Abbott8c017362011-09-22 20:59:12 -07001080 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001081 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001082 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001083 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001084 mutex_unlock(&client->lock);
1085
1086 return 0;
1087}
1088EXPORT_SYMBOL(ion_handle_get_size);
1089
Laura Abbottb14ed962012-01-30 14:18:08 -08001090struct sg_table *ion_sg_table(struct ion_client *client,
1091 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001092{
Laura Abbottb14ed962012-01-30 14:18:08 -08001093 struct ion_buffer *buffer;
1094 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001095
Laura Abbottb14ed962012-01-30 14:18:08 -08001096 mutex_lock(&client->lock);
1097 if (!ion_handle_validate(client, handle)) {
1098 pr_err("%s: invalid handle passed to map_dma.\n",
1099 __func__);
1100 mutex_unlock(&client->lock);
1101 return ERR_PTR(-EINVAL);
1102 }
1103 buffer = handle->buffer;
1104 table = buffer->sg_table;
1105 mutex_unlock(&client->lock);
1106 return table;
1107}
Olav Hauganbd453a92012-07-05 14:21:34 -07001108EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001109
1110static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1111 enum dma_data_direction direction)
1112{
1113 struct dma_buf *dmabuf = attachment->dmabuf;
1114 struct ion_buffer *buffer = dmabuf->priv;
1115
1116 return buffer->sg_table;
1117}
1118
1119static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1120 struct sg_table *table,
1121 enum dma_data_direction direction)
1122{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001123}
1124
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001125static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001126{
Laura Abbottb14ed962012-01-30 14:18:08 -08001127 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001128
1129 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001130
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001131 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001132 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001133 mutex_unlock(&buffer->lock);
1134}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001135
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001136static void ion_vma_close(struct vm_area_struct *vma)
1137{
Laura Abbottb14ed962012-01-30 14:18:08 -08001138 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001139
1140 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001141
Laura Abbott77168502011-12-05 11:06:24 -08001142 mutex_lock(&buffer->lock);
1143 buffer->umap_cnt--;
1144 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001145
1146 if (buffer->heap->ops->unmap_user)
1147 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001148}
1149
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001150static struct vm_operations_struct ion_vm_ops = {
1151 .open = ion_vma_open,
1152 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001153};
1154
Laura Abbottb14ed962012-01-30 14:18:08 -08001155static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001156{
Laura Abbottb14ed962012-01-30 14:18:08 -08001157 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001158 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001159
Laura Abbottb14ed962012-01-30 14:18:08 -08001160 if (!buffer->heap->ops->map_user) {
1161 pr_err("%s: this heap does not define a method for mapping "
1162 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001163 return -EINVAL;
1164 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001165
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001166 mutex_lock(&buffer->lock);
1167 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001168 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001169
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001170 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001171 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001172 pr_err("%s: failure mapping buffer to userspace\n",
1173 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001174 } else {
1175 buffer->umap_cnt++;
1176 mutex_unlock(&buffer->lock);
1177
1178 vma->vm_ops = &ion_vm_ops;
1179 /*
1180 * move the buffer into the vm_private_data so we can access it
1181 * from vma_open/close
1182 */
1183 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001184 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001185 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001186}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001187
Laura Abbottb14ed962012-01-30 14:18:08 -08001188static void ion_dma_buf_release(struct dma_buf *dmabuf)
1189{
1190 struct ion_buffer *buffer = dmabuf->priv;
1191 ion_buffer_put(buffer);
1192}
1193
1194static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1195{
1196 struct ion_buffer *buffer = dmabuf->priv;
1197 return buffer->vaddr + offset;
1198}
1199
1200static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1201 void *ptr)
1202{
1203 return;
1204}
1205
1206static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1207 size_t len,
1208 enum dma_data_direction direction)
1209{
1210 struct ion_buffer *buffer = dmabuf->priv;
1211 void *vaddr;
1212
1213 if (!buffer->heap->ops->map_kernel) {
1214 pr_err("%s: map kernel is not implemented by this heap.\n",
1215 __func__);
1216 return -ENODEV;
1217 }
1218
1219 mutex_lock(&buffer->lock);
1220 vaddr = ion_buffer_kmap_get(buffer);
1221 mutex_unlock(&buffer->lock);
1222 if (IS_ERR(vaddr))
1223 return PTR_ERR(vaddr);
1224 if (!vaddr)
1225 return -ENOMEM;
1226 return 0;
1227}
1228
1229static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1230 size_t len,
1231 enum dma_data_direction direction)
1232{
1233 struct ion_buffer *buffer = dmabuf->priv;
1234
1235 mutex_lock(&buffer->lock);
1236 ion_buffer_kmap_put(buffer);
1237 mutex_unlock(&buffer->lock);
1238}
1239
1240struct dma_buf_ops dma_buf_ops = {
1241 .map_dma_buf = ion_map_dma_buf,
1242 .unmap_dma_buf = ion_unmap_dma_buf,
1243 .mmap = ion_mmap,
1244 .release = ion_dma_buf_release,
1245 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1246 .end_cpu_access = ion_dma_buf_end_cpu_access,
1247 .kmap_atomic = ion_dma_buf_kmap,
1248 .kunmap_atomic = ion_dma_buf_kunmap,
1249 .kmap = ion_dma_buf_kmap,
1250 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001251};
1252
Laura Abbottb14ed962012-01-30 14:18:08 -08001253static int ion_share_set_flags(struct ion_client *client,
1254 struct ion_handle *handle,
1255 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001256{
Laura Abbottb14ed962012-01-30 14:18:08 -08001257 struct ion_buffer *buffer;
1258 bool valid_handle;
1259 unsigned long ion_flags = ION_SET_CACHE(CACHED);
1260 if (flags & O_DSYNC)
1261 ion_flags = ION_SET_CACHE(UNCACHED);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001262
Laura Abbottb14ed962012-01-30 14:18:08 -08001263 mutex_lock(&client->lock);
1264 valid_handle = ion_handle_validate(client, handle);
1265 mutex_unlock(&client->lock);
1266 if (!valid_handle) {
1267 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1268 return -EINVAL;
1269 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001270
Laura Abbottb14ed962012-01-30 14:18:08 -08001271 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001272
Laura Abbottb14ed962012-01-30 14:18:08 -08001273 mutex_lock(&buffer->lock);
1274 if (ion_validate_buffer_flags(buffer, ion_flags)) {
1275 mutex_unlock(&buffer->lock);
1276 return -EEXIST;
1277 }
1278 mutex_unlock(&buffer->lock);
1279 return 0;
1280}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001281
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001282
Laura Abbottb14ed962012-01-30 14:18:08 -08001283int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1284{
1285 struct ion_buffer *buffer;
1286 struct dma_buf *dmabuf;
1287 bool valid_handle;
1288 int fd;
1289
1290 mutex_lock(&client->lock);
1291 valid_handle = ion_handle_validate(client, handle);
1292 mutex_unlock(&client->lock);
1293 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001294 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001295 return -EINVAL;
1296 }
1297
1298 buffer = handle->buffer;
1299 ion_buffer_get(buffer);
1300 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1301 if (IS_ERR(dmabuf)) {
1302 ion_buffer_put(buffer);
1303 return PTR_ERR(dmabuf);
1304 }
1305 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Laura Abbottc2641f72012-08-01 18:06:18 -07001306 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001307 dma_buf_put(dmabuf);
Laura Abbottc2641f72012-08-01 18:06:18 -07001308
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001309 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001310}
Olav Hauganbd453a92012-07-05 14:21:34 -07001311EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001312
Laura Abbottb14ed962012-01-30 14:18:08 -08001313struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1314{
1315 struct dma_buf *dmabuf;
1316 struct ion_buffer *buffer;
1317 struct ion_handle *handle;
1318
1319 dmabuf = dma_buf_get(fd);
1320 if (IS_ERR_OR_NULL(dmabuf))
1321 return ERR_PTR(PTR_ERR(dmabuf));
1322 /* if this memory came from ion */
1323
1324 if (dmabuf->ops != &dma_buf_ops) {
1325 pr_err("%s: can not import dmabuf from another exporter\n",
1326 __func__);
1327 dma_buf_put(dmabuf);
1328 return ERR_PTR(-EINVAL);
1329 }
1330 buffer = dmabuf->priv;
1331
1332 mutex_lock(&client->lock);
1333 /* if a handle exists for this buffer just take a reference to it */
1334 handle = ion_handle_lookup(client, buffer);
1335 if (!IS_ERR_OR_NULL(handle)) {
1336 ion_handle_get(handle);
1337 goto end;
1338 }
1339 handle = ion_handle_create(client, buffer);
1340 if (IS_ERR_OR_NULL(handle))
1341 goto end;
1342 ion_handle_add(client, handle);
1343end:
1344 mutex_unlock(&client->lock);
1345 dma_buf_put(dmabuf);
1346 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001347}
Olav Hauganbd453a92012-07-05 14:21:34 -07001348EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001349
1350static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1351{
1352 struct ion_client *client = filp->private_data;
1353
1354 switch (cmd) {
1355 case ION_IOC_ALLOC:
1356 {
1357 struct ion_allocation_data data;
1358
1359 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1360 return -EFAULT;
1361 data.handle = ion_alloc(client, data.len, data.align,
1362 data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001363
Laura Abbottb14ed962012-01-30 14:18:08 -08001364 if (IS_ERR(data.handle))
1365 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001366
Laura Abbottb14ed962012-01-30 14:18:08 -08001367 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1368 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001369 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001370 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001371 break;
1372 }
1373 case ION_IOC_FREE:
1374 {
1375 struct ion_handle_data data;
1376 bool valid;
1377
1378 if (copy_from_user(&data, (void __user *)arg,
1379 sizeof(struct ion_handle_data)))
1380 return -EFAULT;
1381 mutex_lock(&client->lock);
1382 valid = ion_handle_validate(client, data.handle);
1383 mutex_unlock(&client->lock);
1384 if (!valid)
1385 return -EINVAL;
1386 ion_free(client, data.handle);
1387 break;
1388 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001389 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001390 case ION_IOC_SHARE:
1391 {
1392 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001393 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001394 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1395 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001396
1397 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1398 if (ret)
1399 return ret;
1400
1401 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001402 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1403 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001404 if (data.fd < 0)
1405 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001406 break;
1407 }
1408 case ION_IOC_IMPORT:
1409 {
1410 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001411 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001412 if (copy_from_user(&data, (void __user *)arg,
1413 sizeof(struct ion_fd_data)))
1414 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001415 data.handle = ion_import_dma_buf(client, data.fd);
1416 if (IS_ERR(data.handle))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001417 data.handle = NULL;
1418 if (copy_to_user((void __user *)arg, &data,
1419 sizeof(struct ion_fd_data)))
1420 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001421 if (ret < 0)
1422 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001423 break;
1424 }
1425 case ION_IOC_CUSTOM:
1426 {
1427 struct ion_device *dev = client->dev;
1428 struct ion_custom_data data;
1429
1430 if (!dev->custom_ioctl)
1431 return -ENOTTY;
1432 if (copy_from_user(&data, (void __user *)arg,
1433 sizeof(struct ion_custom_data)))
1434 return -EFAULT;
1435 return dev->custom_ioctl(client, data.cmd, data.arg);
1436 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001437 case ION_IOC_CLEAN_CACHES:
1438 case ION_IOC_INV_CACHES:
1439 case ION_IOC_CLEAN_INV_CACHES:
1440 {
1441 struct ion_flush_data data;
Laura Abbott9fa29e82011-11-14 09:42:53 -08001442 unsigned long start, end;
Laura Abbotte80ea012011-11-18 18:36:47 -08001443 struct ion_handle *handle = NULL;
1444 int ret;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001445
1446 if (copy_from_user(&data, (void __user *)arg,
1447 sizeof(struct ion_flush_data)))
1448 return -EFAULT;
1449
Laura Abbott9fa29e82011-11-14 09:42:53 -08001450 start = (unsigned long) data.vaddr;
1451 end = (unsigned long) data.vaddr + data.length;
1452
1453 if (check_vaddr_bounds(start, end)) {
1454 pr_err("%s: virtual address %p is out of bounds\n",
1455 __func__, data.vaddr);
1456 return -EINVAL;
1457 }
1458
Laura Abbotte80ea012011-11-18 18:36:47 -08001459 if (!data.handle) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001460 handle = ion_import_dma_buf(client, data.fd);
1461 if (IS_ERR(handle)) {
Laura Abbotte80ea012011-11-18 18:36:47 -08001462 pr_info("%s: Could not import handle: %d\n",
1463 __func__, (int)handle);
1464 return -EINVAL;
1465 }
1466 }
1467
1468 ret = ion_do_cache_op(client,
1469 data.handle ? data.handle : handle,
1470 data.vaddr, data.offset, data.length,
1471 cmd);
1472
1473 if (!data.handle)
1474 ion_free(client, handle);
1475
Olav Haugand7baec02012-05-15 14:38:09 -07001476 if (ret < 0)
1477 return ret;
Laura Abbotte80ea012011-11-18 18:36:47 -08001478 break;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001479
1480 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001481 case ION_IOC_GET_FLAGS:
1482 {
1483 struct ion_flag_data data;
1484 int ret;
1485 if (copy_from_user(&data, (void __user *)arg,
1486 sizeof(struct ion_flag_data)))
1487 return -EFAULT;
1488
1489 ret = ion_handle_get_flags(client, data.handle, &data.flags);
1490 if (ret < 0)
1491 return ret;
1492 if (copy_to_user((void __user *)arg, &data,
1493 sizeof(struct ion_flag_data)))
1494 return -EFAULT;
1495 break;
1496 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001497 default:
1498 return -ENOTTY;
1499 }
1500 return 0;
1501}
1502
1503static int ion_release(struct inode *inode, struct file *file)
1504{
1505 struct ion_client *client = file->private_data;
1506
1507 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001508 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001509 return 0;
1510}
1511
1512static int ion_open(struct inode *inode, struct file *file)
1513{
1514 struct miscdevice *miscdev = file->private_data;
1515 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1516 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001517 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001518
1519 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001520 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1521 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001522 if (IS_ERR_OR_NULL(client))
1523 return PTR_ERR(client);
1524 file->private_data = client;
1525
1526 return 0;
1527}
1528
1529static const struct file_operations ion_fops = {
1530 .owner = THIS_MODULE,
1531 .open = ion_open,
1532 .release = ion_release,
1533 .unlocked_ioctl = ion_ioctl,
1534};
1535
1536static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001537 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001538{
1539 size_t size = 0;
1540 struct rb_node *n;
1541
1542 mutex_lock(&client->lock);
1543 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1544 struct ion_handle *handle = rb_entry(n,
1545 struct ion_handle,
1546 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001547 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001548 size += handle->buffer->size;
1549 }
1550 mutex_unlock(&client->lock);
1551 return size;
1552}
1553
Olav Haugan0671b9a2012-05-25 11:58:56 -07001554/**
1555 * Searches through a clients handles to find if the buffer is owned
1556 * by this client. Used for debug output.
1557 * @param client pointer to candidate owner of buffer
1558 * @param buf pointer to buffer that we are trying to find the owner of
1559 * @return 1 if found, 0 otherwise
1560 */
1561static int ion_debug_find_buffer_owner(const struct ion_client *client,
1562 const struct ion_buffer *buf)
1563{
1564 struct rb_node *n;
1565
1566 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1567 const struct ion_handle *handle = rb_entry(n,
1568 const struct ion_handle,
1569 node);
1570 if (handle->buffer == buf)
1571 return 1;
1572 }
1573 return 0;
1574}
1575
1576/**
1577 * Adds mem_map_data pointer to the tree of mem_map
1578 * Used for debug output.
1579 * @param mem_map The mem_map tree
1580 * @param data The new data to add to the tree
1581 */
1582static void ion_debug_mem_map_add(struct rb_root *mem_map,
1583 struct mem_map_data *data)
1584{
1585 struct rb_node **p = &mem_map->rb_node;
1586 struct rb_node *parent = NULL;
1587 struct mem_map_data *entry;
1588
1589 while (*p) {
1590 parent = *p;
1591 entry = rb_entry(parent, struct mem_map_data, node);
1592
1593 if (data->addr < entry->addr) {
1594 p = &(*p)->rb_left;
1595 } else if (data->addr > entry->addr) {
1596 p = &(*p)->rb_right;
1597 } else {
1598 pr_err("%s: mem_map_data already found.", __func__);
1599 BUG();
1600 }
1601 }
1602 rb_link_node(&data->node, parent, p);
1603 rb_insert_color(&data->node, mem_map);
1604}
1605
1606/**
1607 * Search for an owner of a buffer by iterating over all ION clients.
1608 * @param dev ion device containing pointers to all the clients.
1609 * @param buffer pointer to buffer we are trying to find the owner of.
1610 * @return name of owner.
1611 */
1612const char *ion_debug_locate_owner(const struct ion_device *dev,
1613 const struct ion_buffer *buffer)
1614{
1615 struct rb_node *j;
1616 const char *client_name = NULL;
1617
Laura Abbottb14ed962012-01-30 14:18:08 -08001618 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001619 j = rb_next(j)) {
1620 struct ion_client *client = rb_entry(j, struct ion_client,
1621 node);
1622 if (ion_debug_find_buffer_owner(client, buffer))
1623 client_name = client->name;
1624 }
1625 return client_name;
1626}
1627
1628/**
1629 * Create a mem_map of the heap.
1630 * @param s seq_file to log error message to.
1631 * @param heap The heap to create mem_map for.
1632 * @param mem_map The mem map to be created.
1633 */
1634void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1635 struct rb_root *mem_map)
1636{
1637 struct ion_device *dev = heap->dev;
1638 struct rb_node *n;
1639
1640 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1641 struct ion_buffer *buffer =
1642 rb_entry(n, struct ion_buffer, node);
1643 if (buffer->heap->id == heap->id) {
1644 struct mem_map_data *data =
1645 kzalloc(sizeof(*data), GFP_KERNEL);
1646 if (!data) {
1647 seq_printf(s, "ERROR: out of memory. "
1648 "Part of memory map will not be logged\n");
1649 break;
1650 }
1651 data->addr = buffer->priv_phys;
1652 data->addr_end = buffer->priv_phys + buffer->size-1;
1653 data->size = buffer->size;
1654 data->client_name = ion_debug_locate_owner(dev, buffer);
1655 ion_debug_mem_map_add(mem_map, data);
1656 }
1657 }
1658}
1659
1660/**
1661 * Free the memory allocated by ion_debug_mem_map_create
1662 * @param mem_map The mem map to free.
1663 */
1664static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1665{
1666 if (mem_map) {
1667 struct rb_node *n;
1668 while ((n = rb_first(mem_map)) != 0) {
1669 struct mem_map_data *data =
1670 rb_entry(n, struct mem_map_data, node);
1671 rb_erase(&data->node, mem_map);
1672 kfree(data);
1673 }
1674 }
1675}
1676
1677/**
1678 * Print heap debug information.
1679 * @param s seq_file to log message to.
1680 * @param heap pointer to heap that we will print debug information for.
1681 */
1682static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1683{
1684 if (heap->ops->print_debug) {
1685 struct rb_root mem_map = RB_ROOT;
1686 ion_debug_mem_map_create(s, heap, &mem_map);
1687 heap->ops->print_debug(heap, s, &mem_map);
1688 ion_debug_mem_map_destroy(&mem_map);
1689 }
1690}
1691
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001692static int ion_debug_heap_show(struct seq_file *s, void *unused)
1693{
1694 struct ion_heap *heap = s->private;
1695 struct ion_device *dev = heap->dev;
1696 struct rb_node *n;
1697
Olav Haugane4900b52012-05-25 11:58:03 -07001698 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001699 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001700
Laura Abbottb14ed962012-01-30 14:18:08 -08001701 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001702 struct ion_client *client = rb_entry(n, struct ion_client,
1703 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001704 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001705 if (!size)
1706 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001707 if (client->task) {
1708 char task_comm[TASK_COMM_LEN];
1709
1710 get_task_comm(task_comm, client->task);
1711 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1712 client->pid, size);
1713 } else {
1714 seq_printf(s, "%16.s %16u %16u\n", client->name,
1715 client->pid, size);
1716 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001717 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001718 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001719 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001720 return 0;
1721}
1722
1723static int ion_debug_heap_open(struct inode *inode, struct file *file)
1724{
1725 return single_open(file, ion_debug_heap_show, inode->i_private);
1726}
1727
1728static const struct file_operations debug_heap_fops = {
1729 .open = ion_debug_heap_open,
1730 .read = seq_read,
1731 .llseek = seq_lseek,
1732 .release = single_release,
1733};
1734
1735void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1736{
1737 struct rb_node **p = &dev->heaps.rb_node;
1738 struct rb_node *parent = NULL;
1739 struct ion_heap *entry;
1740
Laura Abbottb14ed962012-01-30 14:18:08 -08001741 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1742 !heap->ops->unmap_dma)
1743 pr_err("%s: can not add heap with invalid ops struct.\n",
1744 __func__);
1745
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001746 heap->dev = dev;
1747 mutex_lock(&dev->lock);
1748 while (*p) {
1749 parent = *p;
1750 entry = rb_entry(parent, struct ion_heap, node);
1751
1752 if (heap->id < entry->id) {
1753 p = &(*p)->rb_left;
1754 } else if (heap->id > entry->id ) {
1755 p = &(*p)->rb_right;
1756 } else {
1757 pr_err("%s: can not insert multiple heaps with "
1758 "id %d\n", __func__, heap->id);
1759 goto end;
1760 }
1761 }
1762
1763 rb_link_node(&heap->node, parent, p);
1764 rb_insert_color(&heap->node, &dev->heaps);
1765 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1766 &debug_heap_fops);
1767end:
1768 mutex_unlock(&dev->lock);
1769}
1770
Laura Abbott7e446482012-06-13 15:59:39 -07001771int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1772 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001773{
1774 struct rb_node *n;
1775 int ret_val = 0;
1776
1777 /*
1778 * traverse the list of heaps available in this system
1779 * and find the heap that is specified.
1780 */
1781 mutex_lock(&dev->lock);
1782 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1783 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1784 if (heap->type != ION_HEAP_TYPE_CP)
1785 continue;
1786 if (ION_HEAP(heap->id) != heap_id)
1787 continue;
1788 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001789 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001790 else
1791 ret_val = -EINVAL;
1792 break;
1793 }
1794 mutex_unlock(&dev->lock);
1795 return ret_val;
1796}
Olav Hauganbd453a92012-07-05 14:21:34 -07001797EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001798
Laura Abbott7e446482012-06-13 15:59:39 -07001799int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1800 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001801{
1802 struct rb_node *n;
1803 int ret_val = 0;
1804
1805 /*
1806 * traverse the list of heaps available in this system
1807 * and find the heap that is specified.
1808 */
1809 mutex_lock(&dev->lock);
1810 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1811 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1812 if (heap->type != ION_HEAP_TYPE_CP)
1813 continue;
1814 if (ION_HEAP(heap->id) != heap_id)
1815 continue;
1816 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001817 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001818 else
1819 ret_val = -EINVAL;
1820 break;
1821 }
1822 mutex_unlock(&dev->lock);
1823 return ret_val;
1824}
Olav Hauganbd453a92012-07-05 14:21:34 -07001825EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001826
Laura Abbott404f8242011-10-31 14:22:53 -07001827static int ion_debug_leak_show(struct seq_file *s, void *unused)
1828{
1829 struct ion_device *dev = s->private;
1830 struct rb_node *n;
1831 struct rb_node *n2;
1832
1833 /* mark all buffers as 1 */
1834 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1835 "ref cnt");
1836 mutex_lock(&dev->lock);
1837 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1838 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1839 node);
1840
1841 buf->marked = 1;
1842 }
1843
1844 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001845 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001846 struct ion_client *client = rb_entry(n, struct ion_client,
1847 node);
1848
1849 mutex_lock(&client->lock);
1850 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1851 struct ion_handle *handle = rb_entry(n2,
1852 struct ion_handle, node);
1853
1854 handle->buffer->marked = 0;
1855
1856 }
1857 mutex_unlock(&client->lock);
1858
1859 }
1860
Laura Abbott404f8242011-10-31 14:22:53 -07001861 /* And anyone still marked as a 1 means a leaked handle somewhere */
1862 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1863 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1864 node);
1865
1866 if (buf->marked == 1)
1867 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1868 (int)buf, buf->heap->name, buf->size,
1869 atomic_read(&buf->ref.refcount));
1870 }
1871 mutex_unlock(&dev->lock);
1872 return 0;
1873}
1874
1875static int ion_debug_leak_open(struct inode *inode, struct file *file)
1876{
1877 return single_open(file, ion_debug_leak_show, inode->i_private);
1878}
1879
1880static const struct file_operations debug_leak_fops = {
1881 .open = ion_debug_leak_open,
1882 .read = seq_read,
1883 .llseek = seq_lseek,
1884 .release = single_release,
1885};
1886
1887
1888
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001889struct ion_device *ion_device_create(long (*custom_ioctl)
1890 (struct ion_client *client,
1891 unsigned int cmd,
1892 unsigned long arg))
1893{
1894 struct ion_device *idev;
1895 int ret;
1896
1897 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1898 if (!idev)
1899 return ERR_PTR(-ENOMEM);
1900
1901 idev->dev.minor = MISC_DYNAMIC_MINOR;
1902 idev->dev.name = "ion";
1903 idev->dev.fops = &ion_fops;
1904 idev->dev.parent = NULL;
1905 ret = misc_register(&idev->dev);
1906 if (ret) {
1907 pr_err("ion: failed to register misc device.\n");
1908 return ERR_PTR(ret);
1909 }
1910
1911 idev->debug_root = debugfs_create_dir("ion", NULL);
1912 if (IS_ERR_OR_NULL(idev->debug_root))
1913 pr_err("ion: failed to create debug files.\n");
1914
1915 idev->custom_ioctl = custom_ioctl;
1916 idev->buffers = RB_ROOT;
1917 mutex_init(&idev->lock);
1918 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001919 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001920 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1921 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001922 return idev;
1923}
1924
1925void ion_device_destroy(struct ion_device *dev)
1926{
1927 misc_deregister(&dev->dev);
1928 /* XXX need to free the heaps and clients ? */
1929 kfree(dev);
1930}
Laura Abbottb14ed962012-01-30 14:18:08 -08001931
1932void __init ion_reserve(struct ion_platform_data *data)
1933{
1934 int i, ret;
1935
1936 for (i = 0; i < data->nr; i++) {
1937 if (data->heaps[i].size == 0)
1938 continue;
1939 ret = memblock_reserve(data->heaps[i].base,
1940 data->heaps[i].size);
1941 if (ret)
1942 pr_err("memblock reserve of %x@%lx failed\n",
1943 data->heaps[i].size,
1944 data->heaps[i].base);
1945 }
1946}