Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion.c |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 18 | #include <linux/module.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 19 | #include <linux/device.h> |
| 20 | #include <linux/file.h> |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/anon_inodes.h> |
| 23 | #include <linux/ion.h> |
| 24 | #include <linux/list.h> |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 25 | #include <linux/memblock.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 26 | #include <linux/miscdevice.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 27 | #include <linux/mm.h> |
| 28 | #include <linux/mm_types.h> |
| 29 | #include <linux/rbtree.h> |
| 30 | #include <linux/sched.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/seq_file.h> |
| 33 | #include <linux/uaccess.h> |
| 34 | #include <linux/debugfs.h> |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 35 | #include <linux/dma-buf.h> |
Mitchel Humpherys | d88b8eb | 2012-09-04 17:00:29 -0700 | [diff] [blame] | 36 | #include <linux/msm_ion.h> |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 37 | #include <trace/events/kmem.h> |
| 38 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 39 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 40 | #include <mach/iommu_domains.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 41 | #include "ion_priv.h" |
| 42 | #define DEBUG |
| 43 | |
| 44 | /** |
| 45 | * struct ion_device - the metadata of the ion device node |
| 46 | * @dev: the actual misc device |
| 47 | * @buffers: an rb tree of all the existing buffers |
| 48 | * @lock: lock protecting the buffers & heaps trees |
| 49 | * @heaps: list of all the heaps in the system |
| 50 | * @user_clients: list of all the clients created from userspace |
| 51 | */ |
| 52 | struct ion_device { |
| 53 | struct miscdevice dev; |
| 54 | struct rb_root buffers; |
| 55 | struct mutex lock; |
| 56 | struct rb_root heaps; |
| 57 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, |
| 58 | unsigned long arg); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 59 | struct rb_root clients; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 60 | struct dentry *debug_root; |
| 61 | }; |
| 62 | |
| 63 | /** |
| 64 | * struct ion_client - a process/hw block local address space |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 65 | * @node: node in the tree of all clients |
| 66 | * @dev: backpointer to ion device |
| 67 | * @handles: an rb tree of all the handles in this client |
| 68 | * @lock: lock protecting the tree of handles |
| 69 | * @heap_mask: mask of all supported heaps |
| 70 | * @name: used for debugging |
| 71 | * @task: used for debugging |
| 72 | * |
| 73 | * A client represents a list of buffers this client may access. |
| 74 | * The mutex stored here is used to protect both handles tree |
| 75 | * as well as the handles themselves, and should be held while modifying either. |
| 76 | */ |
| 77 | struct ion_client { |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 78 | struct rb_node node; |
| 79 | struct ion_device *dev; |
| 80 | struct rb_root handles; |
| 81 | struct mutex lock; |
| 82 | unsigned int heap_mask; |
Olav Haugan | 63e5f3b | 2012-01-11 16:42:37 -0800 | [diff] [blame] | 83 | char *name; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 84 | struct task_struct *task; |
| 85 | pid_t pid; |
| 86 | struct dentry *debug_root; |
| 87 | }; |
| 88 | |
| 89 | /** |
| 90 | * ion_handle - a client local reference to a buffer |
| 91 | * @ref: reference count |
| 92 | * @client: back pointer to the client the buffer resides in |
| 93 | * @buffer: pointer to the buffer |
| 94 | * @node: node in the client's handle rbtree |
| 95 | * @kmap_cnt: count of times this client has mapped to kernel |
| 96 | * @dmap_cnt: count of times this client has mapped for dma |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 97 | * |
| 98 | * Modifications to node, map_cnt or mapping should be protected by the |
| 99 | * lock in the client. Other fields are never changed after initialization. |
| 100 | */ |
| 101 | struct ion_handle { |
| 102 | struct kref ref; |
| 103 | struct ion_client *client; |
| 104 | struct ion_buffer *buffer; |
| 105 | struct rb_node node; |
| 106 | unsigned int kmap_cnt; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 107 | unsigned int iommu_map_cnt; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 108 | }; |
| 109 | |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 110 | static void ion_iommu_release(struct kref *kref); |
| 111 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 112 | /* this function should only be called while dev->lock is held */ |
| 113 | static void ion_buffer_add(struct ion_device *dev, |
| 114 | struct ion_buffer *buffer) |
| 115 | { |
| 116 | struct rb_node **p = &dev->buffers.rb_node; |
| 117 | struct rb_node *parent = NULL; |
| 118 | struct ion_buffer *entry; |
| 119 | |
| 120 | while (*p) { |
| 121 | parent = *p; |
| 122 | entry = rb_entry(parent, struct ion_buffer, node); |
| 123 | |
| 124 | if (buffer < entry) { |
| 125 | p = &(*p)->rb_left; |
| 126 | } else if (buffer > entry) { |
| 127 | p = &(*p)->rb_right; |
| 128 | } else { |
| 129 | pr_err("%s: buffer already found.", __func__); |
| 130 | BUG(); |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | rb_link_node(&buffer->node, parent, p); |
| 135 | rb_insert_color(&buffer->node, &dev->buffers); |
| 136 | } |
| 137 | |
Olav Haugan | 0fa9b60 | 2012-01-25 11:50:38 -0800 | [diff] [blame] | 138 | static void ion_iommu_add(struct ion_buffer *buffer, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 139 | struct ion_iommu_map *iommu) |
| 140 | { |
| 141 | struct rb_node **p = &buffer->iommu_maps.rb_node; |
| 142 | struct rb_node *parent = NULL; |
| 143 | struct ion_iommu_map *entry; |
| 144 | |
| 145 | while (*p) { |
| 146 | parent = *p; |
| 147 | entry = rb_entry(parent, struct ion_iommu_map, node); |
| 148 | |
| 149 | if (iommu->key < entry->key) { |
| 150 | p = &(*p)->rb_left; |
| 151 | } else if (iommu->key > entry->key) { |
| 152 | p = &(*p)->rb_right; |
| 153 | } else { |
| 154 | pr_err("%s: buffer %p already has mapping for domain %d" |
| 155 | " and partition %d\n", __func__, |
| 156 | buffer, |
| 157 | iommu_map_domain(iommu), |
| 158 | iommu_map_partition(iommu)); |
| 159 | BUG(); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | rb_link_node(&iommu->node, parent, p); |
| 164 | rb_insert_color(&iommu->node, &buffer->iommu_maps); |
| 165 | |
| 166 | } |
| 167 | |
| 168 | static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer, |
| 169 | unsigned int domain_no, |
| 170 | unsigned int partition_no) |
| 171 | { |
| 172 | struct rb_node **p = &buffer->iommu_maps.rb_node; |
| 173 | struct rb_node *parent = NULL; |
| 174 | struct ion_iommu_map *entry; |
| 175 | uint64_t key = domain_no; |
| 176 | key = key << 32 | partition_no; |
| 177 | |
| 178 | while (*p) { |
| 179 | parent = *p; |
| 180 | entry = rb_entry(parent, struct ion_iommu_map, node); |
| 181 | |
| 182 | if (key < entry->key) |
| 183 | p = &(*p)->rb_left; |
| 184 | else if (key > entry->key) |
| 185 | p = &(*p)->rb_right; |
| 186 | else |
| 187 | return entry; |
| 188 | } |
| 189 | |
| 190 | return NULL; |
| 191 | } |
| 192 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 193 | /* this function should only be called while dev->lock is held */ |
| 194 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, |
| 195 | struct ion_device *dev, |
| 196 | unsigned long len, |
| 197 | unsigned long align, |
| 198 | unsigned long flags) |
| 199 | { |
| 200 | struct ion_buffer *buffer; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 201 | struct sg_table *table; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 202 | int ret; |
| 203 | |
| 204 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); |
| 205 | if (!buffer) |
| 206 | return ERR_PTR(-ENOMEM); |
| 207 | |
| 208 | buffer->heap = heap; |
| 209 | kref_init(&buffer->ref); |
| 210 | |
| 211 | ret = heap->ops->allocate(heap, buffer, len, align, flags); |
| 212 | if (ret) { |
| 213 | kfree(buffer); |
| 214 | return ERR_PTR(ret); |
| 215 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 216 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 217 | buffer->dev = dev; |
| 218 | buffer->size = len; |
Hanumant Singh | 7d72bad | 2012-08-29 18:39:44 -0700 | [diff] [blame] | 219 | buffer->flags = flags; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 220 | |
| 221 | table = buffer->heap->ops->map_dma(buffer->heap, buffer); |
| 222 | if (IS_ERR_OR_NULL(table)) { |
| 223 | heap->ops->free(buffer); |
| 224 | kfree(buffer); |
| 225 | return ERR_PTR(PTR_ERR(table)); |
| 226 | } |
| 227 | buffer->sg_table = table; |
| 228 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 229 | mutex_init(&buffer->lock); |
| 230 | ion_buffer_add(dev, buffer); |
| 231 | return buffer; |
| 232 | } |
| 233 | |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 234 | /** |
| 235 | * Check for delayed IOMMU unmapping. Also unmap any outstanding |
| 236 | * mappings which would otherwise have been leaked. |
| 237 | */ |
| 238 | static void ion_iommu_delayed_unmap(struct ion_buffer *buffer) |
| 239 | { |
| 240 | struct ion_iommu_map *iommu_map; |
| 241 | struct rb_node *node; |
| 242 | const struct rb_root *rb = &(buffer->iommu_maps); |
| 243 | unsigned long ref_count; |
| 244 | unsigned int delayed_unmap; |
| 245 | |
| 246 | mutex_lock(&buffer->lock); |
| 247 | |
| 248 | while ((node = rb_first(rb)) != 0) { |
| 249 | iommu_map = rb_entry(node, struct ion_iommu_map, node); |
| 250 | ref_count = atomic_read(&iommu_map->ref.refcount); |
| 251 | delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED; |
| 252 | |
| 253 | if ((delayed_unmap && ref_count > 1) || !delayed_unmap) { |
| 254 | pr_err("%s: Virtual memory address leak in domain %u, partition %u\n", |
| 255 | __func__, iommu_map->domain_info[DI_DOMAIN_NUM], |
| 256 | iommu_map->domain_info[DI_PARTITION_NUM]); |
| 257 | } |
| 258 | /* set ref count to 1 to force release */ |
| 259 | kref_init(&iommu_map->ref); |
| 260 | kref_put(&iommu_map->ref, ion_iommu_release); |
| 261 | } |
| 262 | |
| 263 | mutex_unlock(&buffer->lock); |
| 264 | } |
| 265 | |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 266 | static void ion_delayed_unsecure(struct ion_buffer *buffer) |
| 267 | { |
| 268 | if (buffer->heap->ops->unsecure_buffer) |
| 269 | buffer->heap->ops->unsecure_buffer(buffer, 1); |
| 270 | } |
| 271 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 272 | static void ion_buffer_destroy(struct kref *kref) |
| 273 | { |
| 274 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); |
| 275 | struct ion_device *dev = buffer->dev; |
| 276 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 277 | if (WARN_ON(buffer->kmap_cnt > 0)) |
| 278 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
| 279 | |
| 280 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); |
| 281 | |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 282 | ion_delayed_unsecure(buffer); |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 283 | ion_iommu_delayed_unmap(buffer); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 284 | buffer->heap->ops->free(buffer); |
| 285 | mutex_lock(&dev->lock); |
| 286 | rb_erase(&buffer->node, &dev->buffers); |
| 287 | mutex_unlock(&dev->lock); |
| 288 | kfree(buffer); |
| 289 | } |
| 290 | |
| 291 | static void ion_buffer_get(struct ion_buffer *buffer) |
| 292 | { |
| 293 | kref_get(&buffer->ref); |
| 294 | } |
| 295 | |
| 296 | static int ion_buffer_put(struct ion_buffer *buffer) |
| 297 | { |
| 298 | return kref_put(&buffer->ref, ion_buffer_destroy); |
| 299 | } |
| 300 | |
| 301 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
| 302 | struct ion_buffer *buffer) |
| 303 | { |
| 304 | struct ion_handle *handle; |
| 305 | |
| 306 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); |
| 307 | if (!handle) |
| 308 | return ERR_PTR(-ENOMEM); |
| 309 | kref_init(&handle->ref); |
| 310 | rb_init_node(&handle->node); |
| 311 | handle->client = client; |
| 312 | ion_buffer_get(buffer); |
| 313 | handle->buffer = buffer; |
| 314 | |
| 315 | return handle; |
| 316 | } |
| 317 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 318 | static void ion_handle_kmap_put(struct ion_handle *); |
| 319 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 320 | static void ion_handle_destroy(struct kref *kref) |
| 321 | { |
| 322 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 323 | struct ion_client *client = handle->client; |
| 324 | struct ion_buffer *buffer = handle->buffer; |
| 325 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 326 | mutex_lock(&buffer->lock); |
| 327 | while (handle->kmap_cnt) |
| 328 | ion_handle_kmap_put(handle); |
| 329 | mutex_unlock(&buffer->lock); |
| 330 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 331 | if (!RB_EMPTY_NODE(&handle->node)) |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 332 | rb_erase(&handle->node, &client->handles); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 333 | |
| 334 | ion_buffer_put(buffer); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 335 | kfree(handle); |
| 336 | } |
| 337 | |
| 338 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) |
| 339 | { |
| 340 | return handle->buffer; |
| 341 | } |
| 342 | |
| 343 | static void ion_handle_get(struct ion_handle *handle) |
| 344 | { |
| 345 | kref_get(&handle->ref); |
| 346 | } |
| 347 | |
| 348 | static int ion_handle_put(struct ion_handle *handle) |
| 349 | { |
| 350 | return kref_put(&handle->ref, ion_handle_destroy); |
| 351 | } |
| 352 | |
| 353 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, |
| 354 | struct ion_buffer *buffer) |
| 355 | { |
| 356 | struct rb_node *n; |
| 357 | |
| 358 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 359 | struct ion_handle *handle = rb_entry(n, struct ion_handle, |
| 360 | node); |
| 361 | if (handle->buffer == buffer) |
| 362 | return handle; |
| 363 | } |
| 364 | return NULL; |
| 365 | } |
| 366 | |
| 367 | static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) |
| 368 | { |
| 369 | struct rb_node *n = client->handles.rb_node; |
| 370 | |
| 371 | while (n) { |
| 372 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, |
| 373 | node); |
| 374 | if (handle < handle_node) |
| 375 | n = n->rb_left; |
| 376 | else if (handle > handle_node) |
| 377 | n = n->rb_right; |
| 378 | else |
| 379 | return true; |
| 380 | } |
| 381 | return false; |
| 382 | } |
| 383 | |
| 384 | static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) |
| 385 | { |
| 386 | struct rb_node **p = &client->handles.rb_node; |
| 387 | struct rb_node *parent = NULL; |
| 388 | struct ion_handle *entry; |
| 389 | |
| 390 | while (*p) { |
| 391 | parent = *p; |
| 392 | entry = rb_entry(parent, struct ion_handle, node); |
| 393 | |
| 394 | if (handle < entry) |
| 395 | p = &(*p)->rb_left; |
| 396 | else if (handle > entry) |
| 397 | p = &(*p)->rb_right; |
| 398 | else |
| 399 | WARN(1, "%s: buffer already found.", __func__); |
| 400 | } |
| 401 | |
| 402 | rb_link_node(&handle->node, parent, p); |
| 403 | rb_insert_color(&handle->node, &client->handles); |
| 404 | } |
| 405 | |
| 406 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, |
Hanumant Singh | 7d72bad | 2012-08-29 18:39:44 -0700 | [diff] [blame] | 407 | size_t align, unsigned int heap_mask, |
| 408 | unsigned int flags) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 409 | { |
| 410 | struct rb_node *n; |
| 411 | struct ion_handle *handle; |
| 412 | struct ion_device *dev = client->dev; |
| 413 | struct ion_buffer *buffer = NULL; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 414 | unsigned long secure_allocation = flags & ION_SECURE; |
Olav Haugan | 35e2f2f | 2012-01-11 17:31:47 -0800 | [diff] [blame] | 415 | const unsigned int MAX_DBG_STR_LEN = 64; |
| 416 | char dbg_str[MAX_DBG_STR_LEN]; |
| 417 | unsigned int dbg_str_idx = 0; |
| 418 | |
| 419 | dbg_str[0] = '\0'; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 420 | |
| 421 | /* |
| 422 | * traverse the list of heaps available in this system in priority |
| 423 | * order. If the heap type is supported by the client, and matches the |
| 424 | * request of the caller allocate from it. Repeat until allocate has |
| 425 | * succeeded or all heaps have been tried |
| 426 | */ |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 427 | if (WARN_ON(!len)) |
| 428 | return ERR_PTR(-EINVAL); |
| 429 | |
| 430 | len = PAGE_ALIGN(len); |
| 431 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 432 | mutex_lock(&dev->lock); |
| 433 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { |
| 434 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); |
| 435 | /* if the client doesn't support this heap type */ |
| 436 | if (!((1 << heap->type) & client->heap_mask)) |
| 437 | continue; |
| 438 | /* if the caller didn't specify this heap type */ |
Hanumant Singh | 7d72bad | 2012-08-29 18:39:44 -0700 | [diff] [blame] | 439 | if (!((1 << heap->id) & heap_mask)) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 440 | continue; |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 441 | /* Do not allow un-secure heap if secure is specified */ |
Mitchel Humpherys | 362b52b | 2012-09-13 10:53:22 -0700 | [diff] [blame] | 442 | if (secure_allocation && |
Laura Abbott | 4afbd8b | 2013-02-15 09:21:33 -0800 | [diff] [blame] | 443 | !ion_heap_allow_secure_allocation(heap->type)) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 444 | continue; |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 445 | trace_ion_alloc_buffer_start(client->name, heap->name, len, |
| 446 | heap_mask, flags); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 447 | buffer = ion_buffer_create(heap, dev, len, align, flags); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 448 | trace_ion_alloc_buffer_end(client->name, heap->name, len, |
| 449 | heap_mask, flags); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 450 | if (!IS_ERR_OR_NULL(buffer)) |
| 451 | break; |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 452 | |
| 453 | trace_ion_alloc_buffer_fallback(client->name, heap->name, len, |
| 454 | heap_mask, flags, PTR_ERR(buffer)); |
Olav Haugan | 35e2f2f | 2012-01-11 17:31:47 -0800 | [diff] [blame] | 455 | if (dbg_str_idx < MAX_DBG_STR_LEN) { |
| 456 | unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; |
| 457 | int ret_value = snprintf(&dbg_str[dbg_str_idx], |
| 458 | len_left, "%s ", heap->name); |
| 459 | if (ret_value >= len_left) { |
| 460 | /* overflow */ |
| 461 | dbg_str[MAX_DBG_STR_LEN-1] = '\0'; |
| 462 | dbg_str_idx = MAX_DBG_STR_LEN; |
| 463 | } else if (ret_value >= 0) { |
| 464 | dbg_str_idx += ret_value; |
| 465 | } else { |
| 466 | /* error */ |
| 467 | dbg_str[MAX_DBG_STR_LEN-1] = '\0'; |
| 468 | } |
| 469 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 470 | } |
| 471 | mutex_unlock(&dev->lock); |
| 472 | |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 473 | if (buffer == NULL) { |
| 474 | trace_ion_alloc_buffer_fail(client->name, dbg_str, len, |
| 475 | heap_mask, flags, -ENODEV); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 476 | return ERR_PTR(-ENODEV); |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 477 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 478 | |
| 479 | if (IS_ERR(buffer)) { |
Liam Mark | cc2d4bd | 2013-01-16 10:14:40 -0800 | [diff] [blame] | 480 | trace_ion_alloc_buffer_fail(client->name, dbg_str, len, |
| 481 | heap_mask, flags, PTR_ERR(buffer)); |
Olav Haugan | 35e2f2f | 2012-01-11 17:31:47 -0800 | [diff] [blame] | 482 | pr_debug("ION is unable to allocate 0x%x bytes (alignment: " |
| 483 | "0x%x) from heap(s) %sfor client %s with heap " |
| 484 | "mask 0x%x\n", |
| 485 | len, align, dbg_str, client->name, client->heap_mask); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 486 | return ERR_PTR(PTR_ERR(buffer)); |
Olav Haugan | 35e2f2f | 2012-01-11 17:31:47 -0800 | [diff] [blame] | 487 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 488 | |
| 489 | handle = ion_handle_create(client, buffer); |
| 490 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 491 | /* |
| 492 | * ion_buffer_create will create a buffer with a ref_cnt of 1, |
| 493 | * and ion_handle_create will take a second reference, drop one here |
| 494 | */ |
| 495 | ion_buffer_put(buffer); |
| 496 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 497 | if (!IS_ERR(handle)) { |
| 498 | mutex_lock(&client->lock); |
| 499 | ion_handle_add(client, handle); |
| 500 | mutex_unlock(&client->lock); |
| 501 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 502 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 503 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 504 | return handle; |
| 505 | } |
Olav Haugan | bd2b692 | 2012-01-25 09:28:55 -0800 | [diff] [blame] | 506 | EXPORT_SYMBOL(ion_alloc); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 507 | |
| 508 | void ion_free(struct ion_client *client, struct ion_handle *handle) |
| 509 | { |
| 510 | bool valid_handle; |
| 511 | |
| 512 | BUG_ON(client != handle->client); |
| 513 | |
| 514 | mutex_lock(&client->lock); |
| 515 | valid_handle = ion_handle_validate(client, handle); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 516 | if (!valid_handle) { |
Laura Abbott | ec149ff | 2012-01-26 13:33:11 -0800 | [diff] [blame] | 517 | mutex_unlock(&client->lock); |
Olav Haugan | 6ede567 | 2012-04-19 10:20:22 -0700 | [diff] [blame] | 518 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 519 | return; |
| 520 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 521 | ion_handle_put(handle); |
Rebecca Schultz Zavin | aad11cb | 2012-08-20 15:41:11 -0700 | [diff] [blame] | 522 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 523 | } |
Olav Haugan | bd2b692 | 2012-01-25 09:28:55 -0800 | [diff] [blame] | 524 | EXPORT_SYMBOL(ion_free); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 525 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 526 | int ion_phys(struct ion_client *client, struct ion_handle *handle, |
| 527 | ion_phys_addr_t *addr, size_t *len) |
| 528 | { |
| 529 | struct ion_buffer *buffer; |
| 530 | int ret; |
| 531 | |
| 532 | mutex_lock(&client->lock); |
| 533 | if (!ion_handle_validate(client, handle)) { |
| 534 | mutex_unlock(&client->lock); |
| 535 | return -EINVAL; |
| 536 | } |
| 537 | |
| 538 | buffer = handle->buffer; |
| 539 | |
| 540 | if (!buffer->heap->ops->phys) { |
| 541 | pr_err("%s: ion_phys is not implemented by this heap.\n", |
| 542 | __func__); |
| 543 | mutex_unlock(&client->lock); |
| 544 | return -ENODEV; |
| 545 | } |
| 546 | mutex_unlock(&client->lock); |
| 547 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); |
| 548 | return ret; |
| 549 | } |
Olav Haugan | bd2b692 | 2012-01-25 09:28:55 -0800 | [diff] [blame] | 550 | EXPORT_SYMBOL(ion_phys); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 551 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 552 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 553 | { |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 554 | void *vaddr; |
| 555 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 556 | if (buffer->kmap_cnt) { |
| 557 | buffer->kmap_cnt++; |
| 558 | return buffer->vaddr; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 559 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 560 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); |
| 561 | if (IS_ERR_OR_NULL(vaddr)) |
| 562 | return vaddr; |
| 563 | buffer->vaddr = vaddr; |
| 564 | buffer->kmap_cnt++; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 565 | return vaddr; |
| 566 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 567 | |
| 568 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
| 569 | { |
| 570 | struct ion_buffer *buffer = handle->buffer; |
| 571 | void *vaddr; |
| 572 | |
| 573 | if (handle->kmap_cnt) { |
| 574 | handle->kmap_cnt++; |
| 575 | return buffer->vaddr; |
| 576 | } |
| 577 | vaddr = ion_buffer_kmap_get(buffer); |
| 578 | if (IS_ERR_OR_NULL(vaddr)) |
| 579 | return vaddr; |
| 580 | handle->kmap_cnt++; |
| 581 | return vaddr; |
| 582 | } |
| 583 | |
| 584 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
| 585 | { |
| 586 | buffer->kmap_cnt--; |
| 587 | if (!buffer->kmap_cnt) { |
| 588 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
| 589 | buffer->vaddr = NULL; |
| 590 | } |
| 591 | } |
| 592 | |
| 593 | static void ion_handle_kmap_put(struct ion_handle *handle) |
| 594 | { |
| 595 | struct ion_buffer *buffer = handle->buffer; |
| 596 | |
| 597 | handle->kmap_cnt--; |
| 598 | if (!handle->kmap_cnt) |
| 599 | ion_buffer_kmap_put(buffer); |
| 600 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 601 | |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 602 | static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer, |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 603 | int domain_num, int partition_num, unsigned long align, |
| 604 | unsigned long iova_length, unsigned long flags, |
| 605 | unsigned long *iova) |
| 606 | { |
| 607 | struct ion_iommu_map *data; |
| 608 | int ret; |
| 609 | |
| 610 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
| 611 | |
| 612 | if (!data) |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 613 | return ERR_PTR(-ENOMEM); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 614 | |
| 615 | data->buffer = buffer; |
| 616 | iommu_map_domain(data) = domain_num; |
| 617 | iommu_map_partition(data) = partition_num; |
| 618 | |
| 619 | ret = buffer->heap->ops->map_iommu(buffer, data, |
| 620 | domain_num, |
| 621 | partition_num, |
| 622 | align, |
| 623 | iova_length, |
| 624 | flags); |
| 625 | |
| 626 | if (ret) |
| 627 | goto out; |
| 628 | |
| 629 | kref_init(&data->ref); |
| 630 | *iova = data->iova_addr; |
| 631 | |
| 632 | ion_iommu_add(buffer, data); |
| 633 | |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 634 | return data; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 635 | |
| 636 | out: |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 637 | kfree(data); |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 638 | return ERR_PTR(ret); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 639 | } |
| 640 | |
| 641 | int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, |
| 642 | int domain_num, int partition_num, unsigned long align, |
| 643 | unsigned long iova_length, unsigned long *iova, |
| 644 | unsigned long *buffer_size, |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 645 | unsigned long flags, unsigned long iommu_flags) |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 646 | { |
| 647 | struct ion_buffer *buffer; |
| 648 | struct ion_iommu_map *iommu_map; |
| 649 | int ret = 0; |
| 650 | |
Huaibin Yang | 8939970 | 2013-01-25 15:32:59 -0800 | [diff] [blame] | 651 | if (IS_ERR_OR_NULL(client)) { |
| 652 | pr_err("%s: client pointer is invalid\n", __func__); |
| 653 | return -EINVAL; |
| 654 | } |
| 655 | if (IS_ERR_OR_NULL(handle)) { |
| 656 | pr_err("%s: handle pointer is invalid\n", __func__); |
| 657 | return -EINVAL; |
| 658 | } |
| 659 | if (IS_ERR_OR_NULL(handle->buffer)) { |
| 660 | pr_err("%s: buffer pointer is invalid\n", __func__); |
| 661 | return -EINVAL; |
| 662 | } |
| 663 | |
Olav Haugan | 79e9ffa | 2012-02-24 13:11:10 -0800 | [diff] [blame] | 664 | if (ION_IS_CACHED(flags)) { |
| 665 | pr_err("%s: Cannot map iommu as cached.\n", __func__); |
| 666 | return -EINVAL; |
| 667 | } |
| 668 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 669 | mutex_lock(&client->lock); |
| 670 | if (!ion_handle_validate(client, handle)) { |
| 671 | pr_err("%s: invalid handle passed to map_kernel.\n", |
| 672 | __func__); |
| 673 | mutex_unlock(&client->lock); |
| 674 | return -EINVAL; |
| 675 | } |
| 676 | |
| 677 | buffer = handle->buffer; |
| 678 | mutex_lock(&buffer->lock); |
| 679 | |
| 680 | if (!handle->buffer->heap->ops->map_iommu) { |
| 681 | pr_err("%s: map_iommu is not implemented by this heap.\n", |
| 682 | __func__); |
| 683 | ret = -ENODEV; |
| 684 | goto out; |
| 685 | } |
| 686 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 687 | /* |
| 688 | * If clients don't want a custom iova length, just use whatever |
| 689 | * the buffer size is |
| 690 | */ |
| 691 | if (!iova_length) |
| 692 | iova_length = buffer->size; |
| 693 | |
| 694 | if (buffer->size > iova_length) { |
| 695 | pr_debug("%s: iova length %lx is not at least buffer size" |
| 696 | " %x\n", __func__, iova_length, buffer->size); |
| 697 | ret = -EINVAL; |
| 698 | goto out; |
| 699 | } |
| 700 | |
| 701 | if (buffer->size & ~PAGE_MASK) { |
| 702 | pr_debug("%s: buffer size %x is not aligned to %lx", __func__, |
| 703 | buffer->size, PAGE_SIZE); |
| 704 | ret = -EINVAL; |
| 705 | goto out; |
| 706 | } |
| 707 | |
| 708 | if (iova_length & ~PAGE_MASK) { |
| 709 | pr_debug("%s: iova_length %lx is not aligned to %lx", __func__, |
| 710 | iova_length, PAGE_SIZE); |
| 711 | ret = -EINVAL; |
| 712 | goto out; |
| 713 | } |
| 714 | |
| 715 | iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 716 | if (!iommu_map) { |
| 717 | iommu_map = __ion_iommu_map(buffer, domain_num, partition_num, |
| 718 | align, iova_length, flags, iova); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 719 | if (!IS_ERR_OR_NULL(iommu_map)) { |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 720 | iommu_map->flags = iommu_flags; |
| 721 | |
| 722 | if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED) |
| 723 | kref_get(&iommu_map->ref); |
Laura Abbott | 11bca60 | 2012-09-14 12:48:18 -0700 | [diff] [blame] | 724 | } else { |
| 725 | ret = PTR_ERR(iommu_map); |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 726 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 727 | } else { |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 728 | if (iommu_map->flags != iommu_flags) { |
| 729 | pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n", |
| 730 | __func__, handle, |
| 731 | iommu_map->flags, iommu_flags); |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 732 | ret = -EINVAL; |
| 733 | } else if (iommu_map->mapped_size != iova_length) { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 734 | pr_err("%s: handle %p is already mapped with length" |
Olav Haugan | b367659 | 2012-03-02 15:02:25 -0800 | [diff] [blame] | 735 | " %x, trying to map with length %lx\n", |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 736 | __func__, handle, iommu_map->mapped_size, |
| 737 | iova_length); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 738 | ret = -EINVAL; |
| 739 | } else { |
| 740 | kref_get(&iommu_map->ref); |
| 741 | *iova = iommu_map->iova_addr; |
| 742 | } |
| 743 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 744 | if (!ret) |
| 745 | buffer->iommu_map_cnt++; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 746 | *buffer_size = buffer->size; |
| 747 | out: |
| 748 | mutex_unlock(&buffer->lock); |
| 749 | mutex_unlock(&client->lock); |
| 750 | return ret; |
| 751 | } |
| 752 | EXPORT_SYMBOL(ion_map_iommu); |
| 753 | |
| 754 | static void ion_iommu_release(struct kref *kref) |
| 755 | { |
| 756 | struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map, |
| 757 | ref); |
| 758 | struct ion_buffer *buffer = map->buffer; |
| 759 | |
| 760 | rb_erase(&map->node, &buffer->iommu_maps); |
| 761 | buffer->heap->ops->unmap_iommu(map); |
| 762 | kfree(map); |
| 763 | } |
| 764 | |
| 765 | void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, |
| 766 | int domain_num, int partition_num) |
| 767 | { |
| 768 | struct ion_iommu_map *iommu_map; |
| 769 | struct ion_buffer *buffer; |
| 770 | |
Huaibin Yang | 8939970 | 2013-01-25 15:32:59 -0800 | [diff] [blame] | 771 | if (IS_ERR_OR_NULL(client)) { |
| 772 | pr_err("%s: client pointer is invalid\n", __func__); |
| 773 | return; |
| 774 | } |
| 775 | if (IS_ERR_OR_NULL(handle)) { |
| 776 | pr_err("%s: handle pointer is invalid\n", __func__); |
| 777 | return; |
| 778 | } |
| 779 | if (IS_ERR_OR_NULL(handle->buffer)) { |
| 780 | pr_err("%s: buffer pointer is invalid\n", __func__); |
| 781 | return; |
| 782 | } |
| 783 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 784 | mutex_lock(&client->lock); |
| 785 | buffer = handle->buffer; |
| 786 | |
| 787 | mutex_lock(&buffer->lock); |
| 788 | |
| 789 | iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); |
| 790 | |
| 791 | if (!iommu_map) { |
| 792 | WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__, |
| 793 | domain_num, partition_num, buffer); |
| 794 | goto out; |
| 795 | } |
| 796 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 797 | kref_put(&iommu_map->ref, ion_iommu_release); |
| 798 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 799 | buffer->iommu_map_cnt--; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 800 | out: |
| 801 | mutex_unlock(&buffer->lock); |
| 802 | |
| 803 | mutex_unlock(&client->lock); |
| 804 | |
| 805 | } |
| 806 | EXPORT_SYMBOL(ion_unmap_iommu); |
| 807 | |
Mitchel Humpherys | 911b4b7 | 2012-09-12 14:42:50 -0700 | [diff] [blame] | 808 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 809 | { |
| 810 | struct ion_buffer *buffer; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 811 | void *vaddr; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 812 | |
| 813 | mutex_lock(&client->lock); |
| 814 | if (!ion_handle_validate(client, handle)) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 815 | pr_err("%s: invalid handle passed to map_kernel.\n", |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 816 | __func__); |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 817 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 818 | return ERR_PTR(-EINVAL); |
| 819 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 820 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 821 | buffer = handle->buffer; |
| 822 | |
| 823 | if (!handle->buffer->heap->ops->map_kernel) { |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 824 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
| 825 | __func__); |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 826 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 827 | return ERR_PTR(-ENODEV); |
| 828 | } |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 829 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 830 | mutex_lock(&buffer->lock); |
| 831 | vaddr = ion_handle_kmap_get(handle); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 832 | mutex_unlock(&buffer->lock); |
| 833 | mutex_unlock(&client->lock); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 834 | return vaddr; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 835 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 836 | EXPORT_SYMBOL(ion_map_kernel); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 837 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 838 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
| 839 | { |
| 840 | struct ion_buffer *buffer; |
| 841 | |
| 842 | mutex_lock(&client->lock); |
| 843 | buffer = handle->buffer; |
| 844 | mutex_lock(&buffer->lock); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 845 | ion_handle_kmap_put(handle); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 846 | mutex_unlock(&buffer->lock); |
| 847 | mutex_unlock(&client->lock); |
| 848 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 849 | EXPORT_SYMBOL(ion_unmap_kernel); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 850 | |
Olav Haugan | 41f8579 | 2012-02-08 15:28:05 -0800 | [diff] [blame] | 851 | int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 852 | void *uaddr, unsigned long offset, unsigned long len, |
| 853 | unsigned int cmd) |
| 854 | { |
| 855 | struct ion_buffer *buffer; |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 856 | int ret = -EINVAL; |
| 857 | |
| 858 | mutex_lock(&client->lock); |
| 859 | if (!ion_handle_validate(client, handle)) { |
| 860 | pr_err("%s: invalid handle passed to do_cache_op.\n", |
| 861 | __func__); |
| 862 | mutex_unlock(&client->lock); |
| 863 | return -EINVAL; |
| 864 | } |
| 865 | buffer = handle->buffer; |
| 866 | mutex_lock(&buffer->lock); |
| 867 | |
Laura Abbott | cbaa668 | 2011-10-19 12:14:14 -0700 | [diff] [blame] | 868 | if (!ION_IS_CACHED(buffer->flags)) { |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 869 | ret = 0; |
| 870 | goto out; |
| 871 | } |
| 872 | |
| 873 | if (!handle->buffer->heap->ops->cache_op) { |
| 874 | pr_err("%s: cache_op is not implemented by this heap.\n", |
| 875 | __func__); |
| 876 | ret = -ENODEV; |
| 877 | goto out; |
| 878 | } |
| 879 | |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 880 | |
| 881 | ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr, |
| 882 | offset, len, cmd); |
| 883 | |
| 884 | out: |
| 885 | mutex_unlock(&buffer->lock); |
| 886 | mutex_unlock(&client->lock); |
| 887 | return ret; |
| 888 | |
| 889 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 890 | EXPORT_SYMBOL(ion_do_cache_op); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 891 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 892 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
| 893 | { |
| 894 | struct ion_client *client = s->private; |
| 895 | struct rb_node *n; |
Olav Haugan | 854c9e1 | 2012-05-16 16:34:28 -0700 | [diff] [blame] | 896 | struct rb_node *n2; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 897 | |
Olav Haugan | 854c9e1 | 2012-05-16 16:34:28 -0700 | [diff] [blame] | 898 | seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n", |
| 899 | "heap_name", "size_in_bytes", "handle refcount", |
| 900 | "buffer", "physical", "[domain,partition] - virt"); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 901 | |
| 902 | mutex_lock(&client->lock); |
| 903 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 904 | struct ion_handle *handle = rb_entry(n, struct ion_handle, |
| 905 | node); |
| 906 | enum ion_heap_type type = handle->buffer->heap->type; |
| 907 | |
Olav Haugan | 854c9e1 | 2012-05-16 16:34:28 -0700 | [diff] [blame] | 908 | seq_printf(s, "%16.16s: %16x : %16d : %12p", |
Laura Abbott | 68c8064 | 2011-10-21 17:32:27 -0700 | [diff] [blame] | 909 | handle->buffer->heap->name, |
| 910 | handle->buffer->size, |
| 911 | atomic_read(&handle->ref.refcount), |
| 912 | handle->buffer); |
Olav Haugan | 854c9e1 | 2012-05-16 16:34:28 -0700 | [diff] [blame] | 913 | |
| 914 | if (type == ION_HEAP_TYPE_SYSTEM_CONTIG || |
| 915 | type == ION_HEAP_TYPE_CARVEOUT || |
Mitchel Humpherys | 362b52b | 2012-09-13 10:53:22 -0700 | [diff] [blame] | 916 | type == (enum ion_heap_type) ION_HEAP_TYPE_CP) |
Olav Haugan | 854c9e1 | 2012-05-16 16:34:28 -0700 | [diff] [blame] | 917 | seq_printf(s, " : %12lx", handle->buffer->priv_phys); |
| 918 | else |
| 919 | seq_printf(s, " : %12s", "N/A"); |
| 920 | |
| 921 | for (n2 = rb_first(&handle->buffer->iommu_maps); n2; |
| 922 | n2 = rb_next(n2)) { |
| 923 | struct ion_iommu_map *imap = |
| 924 | rb_entry(n2, struct ion_iommu_map, node); |
| 925 | seq_printf(s, " : [%d,%d] - %8lx", |
| 926 | imap->domain_info[DI_DOMAIN_NUM], |
| 927 | imap->domain_info[DI_PARTITION_NUM], |
| 928 | imap->iova_addr); |
| 929 | } |
| 930 | seq_printf(s, "\n"); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 931 | } |
| 932 | mutex_unlock(&client->lock); |
| 933 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 934 | return 0; |
| 935 | } |
| 936 | |
| 937 | static int ion_debug_client_open(struct inode *inode, struct file *file) |
| 938 | { |
| 939 | return single_open(file, ion_debug_client_show, inode->i_private); |
| 940 | } |
| 941 | |
| 942 | static const struct file_operations debug_client_fops = { |
| 943 | .open = ion_debug_client_open, |
| 944 | .read = seq_read, |
| 945 | .llseek = seq_lseek, |
| 946 | .release = single_release, |
| 947 | }; |
| 948 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 949 | struct ion_client *ion_client_create(struct ion_device *dev, |
| 950 | unsigned int heap_mask, |
| 951 | const char *name) |
| 952 | { |
| 953 | struct ion_client *client; |
| 954 | struct task_struct *task; |
| 955 | struct rb_node **p; |
| 956 | struct rb_node *parent = NULL; |
| 957 | struct ion_client *entry; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 958 | pid_t pid; |
Olav Haugan | e8a3197 | 2012-05-16 13:11:41 -0700 | [diff] [blame] | 959 | unsigned int name_len; |
| 960 | |
| 961 | if (!name) { |
| 962 | pr_err("%s: Name cannot be null\n", __func__); |
| 963 | return ERR_PTR(-EINVAL); |
| 964 | } |
| 965 | name_len = strnlen(name, 64); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 966 | |
| 967 | get_task_struct(current->group_leader); |
| 968 | task_lock(current->group_leader); |
| 969 | pid = task_pid_nr(current->group_leader); |
| 970 | /* don't bother to store task struct for kernel threads, |
| 971 | they can't be killed anyway */ |
| 972 | if (current->group_leader->flags & PF_KTHREAD) { |
| 973 | put_task_struct(current->group_leader); |
| 974 | task = NULL; |
| 975 | } else { |
| 976 | task = current->group_leader; |
| 977 | } |
| 978 | task_unlock(current->group_leader); |
| 979 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 980 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); |
| 981 | if (!client) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 982 | if (task) |
| 983 | put_task_struct(current->group_leader); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 984 | return ERR_PTR(-ENOMEM); |
| 985 | } |
| 986 | |
| 987 | client->dev = dev; |
| 988 | client->handles = RB_ROOT; |
| 989 | mutex_init(&client->lock); |
Olav Haugan | 63e5f3b | 2012-01-11 16:42:37 -0800 | [diff] [blame] | 990 | |
Olav Haugan | 6625c7d1 | 2012-01-24 13:50:43 -0800 | [diff] [blame] | 991 | client->name = kzalloc(name_len+1, GFP_KERNEL); |
Olav Haugan | 63e5f3b | 2012-01-11 16:42:37 -0800 | [diff] [blame] | 992 | if (!client->name) { |
| 993 | put_task_struct(current->group_leader); |
| 994 | kfree(client); |
| 995 | return ERR_PTR(-ENOMEM); |
| 996 | } else { |
Olav Haugan | 6625c7d1 | 2012-01-24 13:50:43 -0800 | [diff] [blame] | 997 | strlcpy(client->name, name, name_len+1); |
Olav Haugan | 63e5f3b | 2012-01-11 16:42:37 -0800 | [diff] [blame] | 998 | } |
| 999 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1000 | client->heap_mask = heap_mask; |
| 1001 | client->task = task; |
| 1002 | client->pid = pid; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1003 | |
| 1004 | mutex_lock(&dev->lock); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1005 | p = &dev->clients.rb_node; |
| 1006 | while (*p) { |
| 1007 | parent = *p; |
| 1008 | entry = rb_entry(parent, struct ion_client, node); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1009 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1010 | if (client < entry) |
| 1011 | p = &(*p)->rb_left; |
| 1012 | else if (client > entry) |
| 1013 | p = &(*p)->rb_right; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1014 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1015 | rb_link_node(&client->node, parent, p); |
| 1016 | rb_insert_color(&client->node, &dev->clients); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1017 | |
Laura Abbott | eed8603 | 2011-12-05 15:32:36 -0800 | [diff] [blame] | 1018 | |
| 1019 | client->debug_root = debugfs_create_file(name, 0664, |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1020 | dev->debug_root, client, |
| 1021 | &debug_client_fops); |
| 1022 | mutex_unlock(&dev->lock); |
| 1023 | |
| 1024 | return client; |
| 1025 | } |
| 1026 | |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1027 | /** |
| 1028 | * ion_mark_dangling_buffers_locked() - Mark dangling buffers |
| 1029 | * @dev: the ion device whose buffers will be searched |
| 1030 | * |
| 1031 | * Sets marked=1 for all known buffers associated with `dev' that no |
| 1032 | * longer have a handle pointing to them. dev->lock should be held |
| 1033 | * across a call to this function (and should only be unlocked after |
| 1034 | * checking for marked buffers). |
| 1035 | */ |
| 1036 | static void ion_mark_dangling_buffers_locked(struct ion_device *dev) |
| 1037 | { |
| 1038 | struct rb_node *n, *n2; |
| 1039 | /* mark all buffers as 1 */ |
| 1040 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
| 1041 | struct ion_buffer *buf = rb_entry(n, struct ion_buffer, |
| 1042 | node); |
| 1043 | |
| 1044 | buf->marked = 1; |
| 1045 | } |
| 1046 | |
| 1047 | /* now see which buffers we can access */ |
| 1048 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
| 1049 | struct ion_client *client = rb_entry(n, struct ion_client, |
| 1050 | node); |
| 1051 | |
| 1052 | mutex_lock(&client->lock); |
| 1053 | for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) { |
| 1054 | struct ion_handle *handle |
| 1055 | = rb_entry(n2, struct ion_handle, node); |
| 1056 | |
| 1057 | handle->buffer->marked = 0; |
| 1058 | |
| 1059 | } |
| 1060 | mutex_unlock(&client->lock); |
| 1061 | |
| 1062 | } |
| 1063 | } |
| 1064 | |
| 1065 | #ifdef CONFIG_ION_LEAK_CHECK |
| 1066 | static u32 ion_debug_check_leaks_on_destroy; |
| 1067 | |
| 1068 | static int ion_check_for_and_print_leaks(struct ion_device *dev) |
| 1069 | { |
| 1070 | struct rb_node *n; |
| 1071 | int num_leaks = 0; |
| 1072 | |
| 1073 | if (!ion_debug_check_leaks_on_destroy) |
| 1074 | return 0; |
| 1075 | |
| 1076 | /* check for leaked buffers (those that no longer have a |
| 1077 | * handle pointing to them) */ |
| 1078 | ion_mark_dangling_buffers_locked(dev); |
| 1079 | |
| 1080 | /* Anyone still marked as a 1 means a leaked handle somewhere */ |
| 1081 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
| 1082 | struct ion_buffer *buf = rb_entry(n, struct ion_buffer, |
| 1083 | node); |
| 1084 | |
| 1085 | if (buf->marked == 1) { |
| 1086 | pr_info("Leaked ion buffer at %p\n", buf); |
| 1087 | num_leaks++; |
| 1088 | } |
| 1089 | } |
| 1090 | return num_leaks; |
| 1091 | } |
| 1092 | static void setup_ion_leak_check(struct dentry *debug_root) |
| 1093 | { |
| 1094 | debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root, |
| 1095 | &ion_debug_check_leaks_on_destroy); |
| 1096 | } |
| 1097 | #else |
| 1098 | static int ion_check_for_and_print_leaks(struct ion_device *dev) |
| 1099 | { |
| 1100 | return 0; |
| 1101 | } |
| 1102 | static void setup_ion_leak_check(struct dentry *debug_root) |
| 1103 | { |
| 1104 | } |
| 1105 | #endif |
| 1106 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1107 | void ion_client_destroy(struct ion_client *client) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1108 | { |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1109 | struct ion_device *dev = client->dev; |
| 1110 | struct rb_node *n; |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1111 | int num_leaks; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1112 | |
| 1113 | pr_debug("%s: %d\n", __func__, __LINE__); |
| 1114 | while ((n = rb_first(&client->handles))) { |
| 1115 | struct ion_handle *handle = rb_entry(n, struct ion_handle, |
| 1116 | node); |
| 1117 | ion_handle_destroy(&handle->ref); |
| 1118 | } |
| 1119 | mutex_lock(&dev->lock); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1120 | if (client->task) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1121 | put_task_struct(client->task); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1122 | rb_erase(&client->node, &dev->clients); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1123 | debugfs_remove_recursive(client->debug_root); |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1124 | |
| 1125 | num_leaks = ion_check_for_and_print_leaks(dev); |
| 1126 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1127 | mutex_unlock(&dev->lock); |
| 1128 | |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1129 | if (num_leaks) { |
| 1130 | struct task_struct *current_task = current; |
| 1131 | char current_task_name[TASK_COMM_LEN]; |
| 1132 | get_task_comm(current_task_name, current_task); |
| 1133 | WARN(1, "%s: Detected %d leaked ion buffer%s.\n", |
| 1134 | __func__, num_leaks, num_leaks == 1 ? "" : "s"); |
| 1135 | pr_info("task name at time of leak: %s, pid: %d\n", |
| 1136 | current_task_name, current_task->pid); |
| 1137 | } |
| 1138 | |
Olav Haugan | 63e5f3b | 2012-01-11 16:42:37 -0800 | [diff] [blame] | 1139 | kfree(client->name); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1140 | kfree(client); |
| 1141 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1142 | EXPORT_SYMBOL(ion_client_destroy); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1143 | |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1144 | int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, |
| 1145 | unsigned long *flags) |
Rebecca Schultz Zavin | 46d7133 | 2012-05-07 16:06:32 -0700 | [diff] [blame] | 1146 | { |
| 1147 | struct ion_buffer *buffer; |
Rebecca Schultz Zavin | 46d7133 | 2012-05-07 16:06:32 -0700 | [diff] [blame] | 1148 | |
| 1149 | mutex_lock(&client->lock); |
| 1150 | if (!ion_handle_validate(client, handle)) { |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1151 | pr_err("%s: invalid handle passed to %s.\n", |
| 1152 | __func__, __func__); |
Rebecca Schultz Zavin | 46d7133 | 2012-05-07 16:06:32 -0700 | [diff] [blame] | 1153 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1154 | return -EINVAL; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1155 | } |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1156 | buffer = handle->buffer; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1157 | mutex_lock(&buffer->lock); |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1158 | *flags = buffer->flags; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1159 | mutex_unlock(&buffer->lock); |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1160 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1161 | |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1162 | return 0; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1163 | } |
Laura Abbott | 273dd8e | 2011-10-12 14:26:33 -0700 | [diff] [blame] | 1164 | EXPORT_SYMBOL(ion_handle_get_flags); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1165 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1166 | int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, |
| 1167 | unsigned long *size) |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1168 | { |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1169 | struct ion_buffer *buffer; |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1170 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1171 | mutex_lock(&client->lock); |
| 1172 | if (!ion_handle_validate(client, handle)) { |
| 1173 | pr_err("%s: invalid handle passed to %s.\n", |
| 1174 | __func__, __func__); |
| 1175 | mutex_unlock(&client->lock); |
| 1176 | return -EINVAL; |
Rebecca Schultz Zavin | be4a1ee | 2012-04-26 20:44:10 -0700 | [diff] [blame] | 1177 | } |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1178 | buffer = handle->buffer; |
Rebecca Schultz Zavin | be4a1ee | 2012-04-26 20:44:10 -0700 | [diff] [blame] | 1179 | mutex_lock(&buffer->lock); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1180 | *size = buffer->size; |
Rebecca Schultz Zavin | be4a1ee | 2012-04-26 20:44:10 -0700 | [diff] [blame] | 1181 | mutex_unlock(&buffer->lock); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 1182 | mutex_unlock(&client->lock); |
| 1183 | |
| 1184 | return 0; |
| 1185 | } |
| 1186 | EXPORT_SYMBOL(ion_handle_get_size); |
| 1187 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1188 | struct sg_table *ion_sg_table(struct ion_client *client, |
| 1189 | struct ion_handle *handle) |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1190 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1191 | struct ion_buffer *buffer; |
| 1192 | struct sg_table *table; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1193 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1194 | mutex_lock(&client->lock); |
| 1195 | if (!ion_handle_validate(client, handle)) { |
| 1196 | pr_err("%s: invalid handle passed to map_dma.\n", |
| 1197 | __func__); |
| 1198 | mutex_unlock(&client->lock); |
| 1199 | return ERR_PTR(-EINVAL); |
| 1200 | } |
| 1201 | buffer = handle->buffer; |
| 1202 | table = buffer->sg_table; |
| 1203 | mutex_unlock(&client->lock); |
| 1204 | return table; |
| 1205 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1206 | EXPORT_SYMBOL(ion_sg_table); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1207 | |
| 1208 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
| 1209 | enum dma_data_direction direction) |
| 1210 | { |
| 1211 | struct dma_buf *dmabuf = attachment->dmabuf; |
| 1212 | struct ion_buffer *buffer = dmabuf->priv; |
| 1213 | |
| 1214 | return buffer->sg_table; |
| 1215 | } |
| 1216 | |
| 1217 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, |
| 1218 | struct sg_table *table, |
| 1219 | enum dma_data_direction direction) |
| 1220 | { |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1221 | } |
| 1222 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1223 | static void ion_vma_close(struct vm_area_struct *vma) |
| 1224 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1225 | struct ion_buffer *buffer = vma->vm_private_data; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1226 | |
| 1227 | pr_debug("%s: %d\n", __func__, __LINE__); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1228 | |
Laura Abbott | a683509 | 2011-11-14 15:27:02 -0800 | [diff] [blame] | 1229 | if (buffer->heap->ops->unmap_user) |
| 1230 | buffer->heap->ops->unmap_user(buffer->heap, buffer); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1233 | static struct vm_operations_struct ion_vm_ops = { |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1234 | .close = ion_vma_close, |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1235 | }; |
| 1236 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1237 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1238 | { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1239 | struct ion_buffer *buffer = dmabuf->priv; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1240 | int ret; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1241 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1242 | if (!buffer->heap->ops->map_user) { |
| 1243 | pr_err("%s: this heap does not define a method for mapping " |
| 1244 | "to userspace\n", __func__); |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1245 | return -EINVAL; |
| 1246 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1247 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1248 | mutex_lock(&buffer->lock); |
| 1249 | /* now map it to userspace */ |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1250 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); |
Laura Abbott | e8bc7aa | 2011-12-09 14:49:33 -0800 | [diff] [blame] | 1251 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1252 | if (ret) { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1253 | mutex_unlock(&buffer->lock); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1254 | pr_err("%s: failure mapping buffer to userspace\n", |
| 1255 | __func__); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1256 | } else { |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1257 | mutex_unlock(&buffer->lock); |
| 1258 | |
| 1259 | vma->vm_ops = &ion_vm_ops; |
| 1260 | /* |
| 1261 | * move the buffer into the vm_private_data so we can access it |
| 1262 | * from vma_open/close |
| 1263 | */ |
| 1264 | vma->vm_private_data = buffer; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1265 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1266 | return ret; |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1267 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1268 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1269 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
| 1270 | { |
| 1271 | struct ion_buffer *buffer = dmabuf->priv; |
| 1272 | ion_buffer_put(buffer); |
| 1273 | } |
| 1274 | |
| 1275 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
| 1276 | { |
| 1277 | struct ion_buffer *buffer = dmabuf->priv; |
| 1278 | return buffer->vaddr + offset; |
| 1279 | } |
| 1280 | |
| 1281 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
| 1282 | void *ptr) |
| 1283 | { |
| 1284 | return; |
| 1285 | } |
| 1286 | |
| 1287 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, |
| 1288 | size_t len, |
| 1289 | enum dma_data_direction direction) |
| 1290 | { |
| 1291 | struct ion_buffer *buffer = dmabuf->priv; |
| 1292 | void *vaddr; |
| 1293 | |
| 1294 | if (!buffer->heap->ops->map_kernel) { |
| 1295 | pr_err("%s: map kernel is not implemented by this heap.\n", |
| 1296 | __func__); |
| 1297 | return -ENODEV; |
| 1298 | } |
| 1299 | |
| 1300 | mutex_lock(&buffer->lock); |
| 1301 | vaddr = ion_buffer_kmap_get(buffer); |
| 1302 | mutex_unlock(&buffer->lock); |
| 1303 | if (IS_ERR(vaddr)) |
| 1304 | return PTR_ERR(vaddr); |
| 1305 | if (!vaddr) |
| 1306 | return -ENOMEM; |
| 1307 | return 0; |
| 1308 | } |
| 1309 | |
| 1310 | static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, |
| 1311 | size_t len, |
| 1312 | enum dma_data_direction direction) |
| 1313 | { |
| 1314 | struct ion_buffer *buffer = dmabuf->priv; |
| 1315 | |
| 1316 | mutex_lock(&buffer->lock); |
| 1317 | ion_buffer_kmap_put(buffer); |
| 1318 | mutex_unlock(&buffer->lock); |
| 1319 | } |
| 1320 | |
| 1321 | struct dma_buf_ops dma_buf_ops = { |
| 1322 | .map_dma_buf = ion_map_dma_buf, |
| 1323 | .unmap_dma_buf = ion_unmap_dma_buf, |
| 1324 | .mmap = ion_mmap, |
| 1325 | .release = ion_dma_buf_release, |
| 1326 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
| 1327 | .end_cpu_access = ion_dma_buf_end_cpu_access, |
| 1328 | .kmap_atomic = ion_dma_buf_kmap, |
| 1329 | .kunmap_atomic = ion_dma_buf_kunmap, |
| 1330 | .kmap = ion_dma_buf_kmap, |
| 1331 | .kunmap = ion_dma_buf_kunmap, |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1332 | }; |
| 1333 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1334 | int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) |
| 1335 | { |
| 1336 | struct ion_buffer *buffer; |
| 1337 | struct dma_buf *dmabuf; |
| 1338 | bool valid_handle; |
| 1339 | int fd; |
| 1340 | |
| 1341 | mutex_lock(&client->lock); |
| 1342 | valid_handle = ion_handle_validate(client, handle); |
| 1343 | mutex_unlock(&client->lock); |
| 1344 | if (!valid_handle) { |
Olav Haugan | 0df5994 | 2012-07-05 14:27:30 -0700 | [diff] [blame] | 1345 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1346 | return -EINVAL; |
| 1347 | } |
| 1348 | |
| 1349 | buffer = handle->buffer; |
| 1350 | ion_buffer_get(buffer); |
| 1351 | dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); |
| 1352 | if (IS_ERR(dmabuf)) { |
| 1353 | ion_buffer_put(buffer); |
| 1354 | return PTR_ERR(dmabuf); |
| 1355 | } |
| 1356 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
Laura Abbott | c2641f7 | 2012-08-01 18:06:18 -0700 | [diff] [blame] | 1357 | if (fd < 0) |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1358 | dma_buf_put(dmabuf); |
Laura Abbott | c2641f7 | 2012-08-01 18:06:18 -0700 | [diff] [blame] | 1359 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1360 | return fd; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1361 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1362 | EXPORT_SYMBOL(ion_share_dma_buf); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1363 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1364 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) |
| 1365 | { |
| 1366 | struct dma_buf *dmabuf; |
| 1367 | struct ion_buffer *buffer; |
| 1368 | struct ion_handle *handle; |
| 1369 | |
| 1370 | dmabuf = dma_buf_get(fd); |
| 1371 | if (IS_ERR_OR_NULL(dmabuf)) |
| 1372 | return ERR_PTR(PTR_ERR(dmabuf)); |
| 1373 | /* if this memory came from ion */ |
| 1374 | |
| 1375 | if (dmabuf->ops != &dma_buf_ops) { |
| 1376 | pr_err("%s: can not import dmabuf from another exporter\n", |
| 1377 | __func__); |
| 1378 | dma_buf_put(dmabuf); |
| 1379 | return ERR_PTR(-EINVAL); |
| 1380 | } |
| 1381 | buffer = dmabuf->priv; |
| 1382 | |
| 1383 | mutex_lock(&client->lock); |
| 1384 | /* if a handle exists for this buffer just take a reference to it */ |
| 1385 | handle = ion_handle_lookup(client, buffer); |
| 1386 | if (!IS_ERR_OR_NULL(handle)) { |
| 1387 | ion_handle_get(handle); |
| 1388 | goto end; |
| 1389 | } |
| 1390 | handle = ion_handle_create(client, buffer); |
| 1391 | if (IS_ERR_OR_NULL(handle)) |
| 1392 | goto end; |
| 1393 | ion_handle_add(client, handle); |
| 1394 | end: |
| 1395 | mutex_unlock(&client->lock); |
| 1396 | dma_buf_put(dmabuf); |
| 1397 | return handle; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1398 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1399 | EXPORT_SYMBOL(ion_import_dma_buf); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1400 | |
| 1401 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 1402 | { |
| 1403 | struct ion_client *client = filp->private_data; |
| 1404 | |
| 1405 | switch (cmd) { |
| 1406 | case ION_IOC_ALLOC: |
| 1407 | { |
| 1408 | struct ion_allocation_data data; |
| 1409 | |
| 1410 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) |
| 1411 | return -EFAULT; |
| 1412 | data.handle = ion_alloc(client, data.len, data.align, |
Hanumant Singh | 7d72bad | 2012-08-29 18:39:44 -0700 | [diff] [blame] | 1413 | data.heap_mask, data.flags); |
KyongHo Cho | 9ae7e01 | 2011-09-07 11:27:07 +0900 | [diff] [blame] | 1414 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1415 | if (IS_ERR(data.handle)) |
| 1416 | return PTR_ERR(data.handle); |
KyongHo Cho | 9ae7e01 | 2011-09-07 11:27:07 +0900 | [diff] [blame] | 1417 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1418 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) { |
| 1419 | ion_free(client, data.handle); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1420 | return -EFAULT; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1421 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1422 | break; |
| 1423 | } |
| 1424 | case ION_IOC_FREE: |
| 1425 | { |
| 1426 | struct ion_handle_data data; |
| 1427 | bool valid; |
| 1428 | |
| 1429 | if (copy_from_user(&data, (void __user *)arg, |
| 1430 | sizeof(struct ion_handle_data))) |
| 1431 | return -EFAULT; |
| 1432 | mutex_lock(&client->lock); |
| 1433 | valid = ion_handle_validate(client, data.handle); |
| 1434 | mutex_unlock(&client->lock); |
| 1435 | if (!valid) |
| 1436 | return -EINVAL; |
| 1437 | ion_free(client, data.handle); |
| 1438 | break; |
| 1439 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1440 | case ION_IOC_MAP: |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1441 | case ION_IOC_SHARE: |
| 1442 | { |
| 1443 | struct ion_fd_data data; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1444 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) |
| 1445 | return -EFAULT; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1446 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1447 | data.fd = ion_share_dma_buf(client, data.handle); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1448 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) |
| 1449 | return -EFAULT; |
Olav Haugan | c2d2cf5 | 2012-05-15 14:40:11 -0700 | [diff] [blame] | 1450 | if (data.fd < 0) |
| 1451 | return data.fd; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1452 | break; |
| 1453 | } |
| 1454 | case ION_IOC_IMPORT: |
| 1455 | { |
| 1456 | struct ion_fd_data data; |
Olav Haugan | c2d2cf5 | 2012-05-15 14:40:11 -0700 | [diff] [blame] | 1457 | int ret = 0; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1458 | if (copy_from_user(&data, (void __user *)arg, |
| 1459 | sizeof(struct ion_fd_data))) |
| 1460 | return -EFAULT; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1461 | data.handle = ion_import_dma_buf(client, data.fd); |
Olav Haugan | 865e97f | 2012-05-15 14:40:11 -0700 | [diff] [blame] | 1462 | if (IS_ERR(data.handle)) { |
| 1463 | ret = PTR_ERR(data.handle); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1464 | data.handle = NULL; |
Olav Haugan | 865e97f | 2012-05-15 14:40:11 -0700 | [diff] [blame] | 1465 | } |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1466 | if (copy_to_user((void __user *)arg, &data, |
| 1467 | sizeof(struct ion_fd_data))) |
| 1468 | return -EFAULT; |
Olav Haugan | c2d2cf5 | 2012-05-15 14:40:11 -0700 | [diff] [blame] | 1469 | if (ret < 0) |
| 1470 | return ret; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1471 | break; |
| 1472 | } |
| 1473 | case ION_IOC_CUSTOM: |
| 1474 | { |
| 1475 | struct ion_device *dev = client->dev; |
| 1476 | struct ion_custom_data data; |
| 1477 | |
| 1478 | if (!dev->custom_ioctl) |
| 1479 | return -ENOTTY; |
| 1480 | if (copy_from_user(&data, (void __user *)arg, |
| 1481 | sizeof(struct ion_custom_data))) |
| 1482 | return -EFAULT; |
| 1483 | return dev->custom_ioctl(client, data.cmd, data.arg); |
| 1484 | } |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 1485 | case ION_IOC_CLEAN_CACHES: |
Mitchel Humpherys | d88b8eb | 2012-09-04 17:00:29 -0700 | [diff] [blame] | 1486 | return client->dev->custom_ioctl(client, |
| 1487 | ION_IOC_CLEAN_CACHES, arg); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 1488 | case ION_IOC_INV_CACHES: |
Mitchel Humpherys | d88b8eb | 2012-09-04 17:00:29 -0700 | [diff] [blame] | 1489 | return client->dev->custom_ioctl(client, |
| 1490 | ION_IOC_INV_CACHES, arg); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 1491 | case ION_IOC_CLEAN_INV_CACHES: |
Mitchel Humpherys | d88b8eb | 2012-09-04 17:00:29 -0700 | [diff] [blame] | 1492 | return client->dev->custom_ioctl(client, |
| 1493 | ION_IOC_CLEAN_INV_CACHES, arg); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1494 | default: |
| 1495 | return -ENOTTY; |
| 1496 | } |
| 1497 | return 0; |
| 1498 | } |
| 1499 | |
| 1500 | static int ion_release(struct inode *inode, struct file *file) |
| 1501 | { |
| 1502 | struct ion_client *client = file->private_data; |
| 1503 | |
| 1504 | pr_debug("%s: %d\n", __func__, __LINE__); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1505 | ion_client_destroy(client); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1506 | return 0; |
| 1507 | } |
| 1508 | |
| 1509 | static int ion_open(struct inode *inode, struct file *file) |
| 1510 | { |
| 1511 | struct miscdevice *miscdev = file->private_data; |
| 1512 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); |
| 1513 | struct ion_client *client; |
Laura Abbott | eed8603 | 2011-12-05 15:32:36 -0800 | [diff] [blame] | 1514 | char debug_name[64]; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1515 | |
| 1516 | pr_debug("%s: %d\n", __func__, __LINE__); |
Laura Abbott | eed8603 | 2011-12-05 15:32:36 -0800 | [diff] [blame] | 1517 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); |
| 1518 | client = ion_client_create(dev, -1, debug_name); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1519 | if (IS_ERR_OR_NULL(client)) |
| 1520 | return PTR_ERR(client); |
| 1521 | file->private_data = client; |
| 1522 | |
| 1523 | return 0; |
| 1524 | } |
| 1525 | |
| 1526 | static const struct file_operations ion_fops = { |
| 1527 | .owner = THIS_MODULE, |
| 1528 | .open = ion_open, |
| 1529 | .release = ion_release, |
| 1530 | .unlocked_ioctl = ion_ioctl, |
| 1531 | }; |
| 1532 | |
| 1533 | static size_t ion_debug_heap_total(struct ion_client *client, |
Laura Abbott | 3647ac3 | 2011-10-31 14:09:53 -0700 | [diff] [blame] | 1534 | enum ion_heap_ids id) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1535 | { |
| 1536 | size_t size = 0; |
| 1537 | struct rb_node *n; |
| 1538 | |
| 1539 | mutex_lock(&client->lock); |
| 1540 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 1541 | struct ion_handle *handle = rb_entry(n, |
| 1542 | struct ion_handle, |
| 1543 | node); |
Laura Abbott | 3647ac3 | 2011-10-31 14:09:53 -0700 | [diff] [blame] | 1544 | if (handle->buffer->heap->id == id) |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1545 | size += handle->buffer->size; |
| 1546 | } |
| 1547 | mutex_unlock(&client->lock); |
| 1548 | return size; |
| 1549 | } |
| 1550 | |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1551 | /** |
| 1552 | * Searches through a clients handles to find if the buffer is owned |
| 1553 | * by this client. Used for debug output. |
| 1554 | * @param client pointer to candidate owner of buffer |
| 1555 | * @param buf pointer to buffer that we are trying to find the owner of |
| 1556 | * @return 1 if found, 0 otherwise |
| 1557 | */ |
| 1558 | static int ion_debug_find_buffer_owner(const struct ion_client *client, |
| 1559 | const struct ion_buffer *buf) |
| 1560 | { |
| 1561 | struct rb_node *n; |
| 1562 | |
| 1563 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 1564 | const struct ion_handle *handle = rb_entry(n, |
| 1565 | const struct ion_handle, |
| 1566 | node); |
| 1567 | if (handle->buffer == buf) |
| 1568 | return 1; |
| 1569 | } |
| 1570 | return 0; |
| 1571 | } |
| 1572 | |
| 1573 | /** |
| 1574 | * Adds mem_map_data pointer to the tree of mem_map |
| 1575 | * Used for debug output. |
| 1576 | * @param mem_map The mem_map tree |
| 1577 | * @param data The new data to add to the tree |
| 1578 | */ |
| 1579 | static void ion_debug_mem_map_add(struct rb_root *mem_map, |
| 1580 | struct mem_map_data *data) |
| 1581 | { |
| 1582 | struct rb_node **p = &mem_map->rb_node; |
| 1583 | struct rb_node *parent = NULL; |
| 1584 | struct mem_map_data *entry; |
| 1585 | |
| 1586 | while (*p) { |
| 1587 | parent = *p; |
| 1588 | entry = rb_entry(parent, struct mem_map_data, node); |
| 1589 | |
| 1590 | if (data->addr < entry->addr) { |
| 1591 | p = &(*p)->rb_left; |
| 1592 | } else if (data->addr > entry->addr) { |
| 1593 | p = &(*p)->rb_right; |
| 1594 | } else { |
| 1595 | pr_err("%s: mem_map_data already found.", __func__); |
| 1596 | BUG(); |
| 1597 | } |
| 1598 | } |
| 1599 | rb_link_node(&data->node, parent, p); |
| 1600 | rb_insert_color(&data->node, mem_map); |
| 1601 | } |
| 1602 | |
| 1603 | /** |
| 1604 | * Search for an owner of a buffer by iterating over all ION clients. |
| 1605 | * @param dev ion device containing pointers to all the clients. |
| 1606 | * @param buffer pointer to buffer we are trying to find the owner of. |
| 1607 | * @return name of owner. |
| 1608 | */ |
| 1609 | const char *ion_debug_locate_owner(const struct ion_device *dev, |
| 1610 | const struct ion_buffer *buffer) |
| 1611 | { |
| 1612 | struct rb_node *j; |
| 1613 | const char *client_name = NULL; |
| 1614 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1615 | for (j = rb_first(&dev->clients); j && !client_name; |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1616 | j = rb_next(j)) { |
| 1617 | struct ion_client *client = rb_entry(j, struct ion_client, |
| 1618 | node); |
| 1619 | if (ion_debug_find_buffer_owner(client, buffer)) |
| 1620 | client_name = client->name; |
| 1621 | } |
| 1622 | return client_name; |
| 1623 | } |
| 1624 | |
| 1625 | /** |
| 1626 | * Create a mem_map of the heap. |
| 1627 | * @param s seq_file to log error message to. |
| 1628 | * @param heap The heap to create mem_map for. |
| 1629 | * @param mem_map The mem map to be created. |
| 1630 | */ |
| 1631 | void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap, |
| 1632 | struct rb_root *mem_map) |
| 1633 | { |
| 1634 | struct ion_device *dev = heap->dev; |
| 1635 | struct rb_node *n; |
Chintan Pandya | daf7562 | 2013-01-29 19:40:01 +0530 | [diff] [blame] | 1636 | size_t size; |
| 1637 | |
| 1638 | if (!heap->ops->phys) |
| 1639 | return; |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1640 | |
| 1641 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
| 1642 | struct ion_buffer *buffer = |
| 1643 | rb_entry(n, struct ion_buffer, node); |
| 1644 | if (buffer->heap->id == heap->id) { |
| 1645 | struct mem_map_data *data = |
| 1646 | kzalloc(sizeof(*data), GFP_KERNEL); |
| 1647 | if (!data) { |
| 1648 | seq_printf(s, "ERROR: out of memory. " |
| 1649 | "Part of memory map will not be logged\n"); |
| 1650 | break; |
| 1651 | } |
Chintan Pandya | daf7562 | 2013-01-29 19:40:01 +0530 | [diff] [blame] | 1652 | |
| 1653 | buffer->heap->ops->phys(buffer->heap, buffer, |
| 1654 | &(data->addr), &size); |
| 1655 | data->size = (unsigned long) size; |
| 1656 | data->addr_end = data->addr + data->size - 1; |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1657 | data->client_name = ion_debug_locate_owner(dev, buffer); |
| 1658 | ion_debug_mem_map_add(mem_map, data); |
| 1659 | } |
| 1660 | } |
| 1661 | } |
| 1662 | |
| 1663 | /** |
| 1664 | * Free the memory allocated by ion_debug_mem_map_create |
| 1665 | * @param mem_map The mem map to free. |
| 1666 | */ |
| 1667 | static void ion_debug_mem_map_destroy(struct rb_root *mem_map) |
| 1668 | { |
| 1669 | if (mem_map) { |
| 1670 | struct rb_node *n; |
| 1671 | while ((n = rb_first(mem_map)) != 0) { |
| 1672 | struct mem_map_data *data = |
| 1673 | rb_entry(n, struct mem_map_data, node); |
| 1674 | rb_erase(&data->node, mem_map); |
| 1675 | kfree(data); |
| 1676 | } |
| 1677 | } |
| 1678 | } |
| 1679 | |
| 1680 | /** |
| 1681 | * Print heap debug information. |
| 1682 | * @param s seq_file to log message to. |
| 1683 | * @param heap pointer to heap that we will print debug information for. |
| 1684 | */ |
| 1685 | static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap) |
| 1686 | { |
| 1687 | if (heap->ops->print_debug) { |
| 1688 | struct rb_root mem_map = RB_ROOT; |
| 1689 | ion_debug_mem_map_create(s, heap, &mem_map); |
| 1690 | heap->ops->print_debug(heap, s, &mem_map); |
| 1691 | ion_debug_mem_map_destroy(&mem_map); |
| 1692 | } |
| 1693 | } |
| 1694 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1695 | static int ion_debug_heap_show(struct seq_file *s, void *unused) |
| 1696 | { |
| 1697 | struct ion_heap *heap = s->private; |
| 1698 | struct ion_device *dev = heap->dev; |
| 1699 | struct rb_node *n; |
| 1700 | |
Olav Haugan | e4900b5 | 2012-05-25 11:58:03 -0700 | [diff] [blame] | 1701 | mutex_lock(&dev->lock); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1702 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); |
Rebecca Schultz Zavin | 043a614 | 2012-02-01 11:09:46 -0800 | [diff] [blame] | 1703 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1704 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1705 | struct ion_client *client = rb_entry(n, struct ion_client, |
| 1706 | node); |
Laura Abbott | 3647ac3 | 2011-10-31 14:09:53 -0700 | [diff] [blame] | 1707 | size_t size = ion_debug_heap_total(client, heap->id); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1708 | if (!size) |
| 1709 | continue; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1710 | if (client->task) { |
| 1711 | char task_comm[TASK_COMM_LEN]; |
| 1712 | |
| 1713 | get_task_comm(task_comm, client->task); |
| 1714 | seq_printf(s, "%16.s %16u %16u\n", task_comm, |
| 1715 | client->pid, size); |
| 1716 | } else { |
| 1717 | seq_printf(s, "%16.s %16u %16u\n", client->name, |
| 1718 | client->pid, size); |
| 1719 | } |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1720 | } |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 1721 | ion_heap_print_debug(s, heap); |
Olav Haugan | e4900b5 | 2012-05-25 11:58:03 -0700 | [diff] [blame] | 1722 | mutex_unlock(&dev->lock); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1723 | return 0; |
| 1724 | } |
| 1725 | |
| 1726 | static int ion_debug_heap_open(struct inode *inode, struct file *file) |
| 1727 | { |
| 1728 | return single_open(file, ion_debug_heap_show, inode->i_private); |
| 1729 | } |
| 1730 | |
| 1731 | static const struct file_operations debug_heap_fops = { |
| 1732 | .open = ion_debug_heap_open, |
| 1733 | .read = seq_read, |
| 1734 | .llseek = seq_lseek, |
| 1735 | .release = single_release, |
| 1736 | }; |
| 1737 | |
| 1738 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) |
| 1739 | { |
| 1740 | struct rb_node **p = &dev->heaps.rb_node; |
| 1741 | struct rb_node *parent = NULL; |
| 1742 | struct ion_heap *entry; |
| 1743 | |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1744 | if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || |
| 1745 | !heap->ops->unmap_dma) |
| 1746 | pr_err("%s: can not add heap with invalid ops struct.\n", |
| 1747 | __func__); |
| 1748 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1749 | heap->dev = dev; |
| 1750 | mutex_lock(&dev->lock); |
| 1751 | while (*p) { |
| 1752 | parent = *p; |
| 1753 | entry = rb_entry(parent, struct ion_heap, node); |
| 1754 | |
| 1755 | if (heap->id < entry->id) { |
| 1756 | p = &(*p)->rb_left; |
| 1757 | } else if (heap->id > entry->id ) { |
| 1758 | p = &(*p)->rb_right; |
| 1759 | } else { |
| 1760 | pr_err("%s: can not insert multiple heaps with " |
| 1761 | "id %d\n", __func__, heap->id); |
| 1762 | goto end; |
| 1763 | } |
| 1764 | } |
| 1765 | |
| 1766 | rb_link_node(&heap->node, parent, p); |
| 1767 | rb_insert_color(&heap->node, &dev->heaps); |
| 1768 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, |
| 1769 | &debug_heap_fops); |
| 1770 | end: |
| 1771 | mutex_unlock(&dev->lock); |
| 1772 | } |
| 1773 | |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 1774 | int ion_secure_handle(struct ion_client *client, struct ion_handle *handle, |
| 1775 | int version, void *data, int flags) |
| 1776 | { |
| 1777 | int ret = -EINVAL; |
| 1778 | struct ion_heap *heap; |
| 1779 | struct ion_buffer *buffer; |
| 1780 | |
| 1781 | mutex_lock(&client->lock); |
| 1782 | if (!ion_handle_validate(client, handle)) { |
| 1783 | WARN(1, "%s: invalid handle passed to secure.\n", __func__); |
| 1784 | goto out_unlock; |
| 1785 | } |
| 1786 | |
| 1787 | buffer = handle->buffer; |
| 1788 | heap = buffer->heap; |
| 1789 | |
Laura Abbott | 4afbd8b | 2013-02-15 09:21:33 -0800 | [diff] [blame] | 1790 | if (!ion_heap_allow_handle_secure(heap->type)) { |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 1791 | pr_err("%s: cannot secure buffer from non secure heap\n", |
| 1792 | __func__); |
| 1793 | goto out_unlock; |
| 1794 | } |
| 1795 | |
| 1796 | BUG_ON(!buffer->heap->ops->secure_buffer); |
| 1797 | /* |
| 1798 | * Protect the handle via the client lock to ensure we aren't |
| 1799 | * racing with free |
| 1800 | */ |
| 1801 | ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags); |
| 1802 | |
| 1803 | out_unlock: |
| 1804 | mutex_unlock(&client->lock); |
| 1805 | return ret; |
| 1806 | } |
| 1807 | |
| 1808 | int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle) |
| 1809 | { |
| 1810 | int ret = -EINVAL; |
| 1811 | struct ion_heap *heap; |
| 1812 | struct ion_buffer *buffer; |
| 1813 | |
| 1814 | mutex_lock(&client->lock); |
| 1815 | if (!ion_handle_validate(client, handle)) { |
| 1816 | WARN(1, "%s: invalid handle passed to secure.\n", __func__); |
| 1817 | goto out_unlock; |
| 1818 | } |
| 1819 | |
| 1820 | buffer = handle->buffer; |
| 1821 | heap = buffer->heap; |
| 1822 | |
Laura Abbott | 4afbd8b | 2013-02-15 09:21:33 -0800 | [diff] [blame] | 1823 | if (!ion_heap_allow_handle_secure(heap->type)) { |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 1824 | pr_err("%s: cannot secure buffer from non secure heap\n", |
| 1825 | __func__); |
| 1826 | goto out_unlock; |
| 1827 | } |
| 1828 | |
| 1829 | BUG_ON(!buffer->heap->ops->unsecure_buffer); |
| 1830 | /* |
| 1831 | * Protect the handle via the client lock to ensure we aren't |
| 1832 | * racing with free |
| 1833 | */ |
| 1834 | ret = buffer->heap->ops->unsecure_buffer(buffer, 0); |
| 1835 | |
| 1836 | out_unlock: |
| 1837 | mutex_unlock(&client->lock); |
| 1838 | return ret; |
| 1839 | } |
| 1840 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1841 | int ion_secure_heap(struct ion_device *dev, int heap_id, int version, |
| 1842 | void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1843 | { |
| 1844 | struct rb_node *n; |
| 1845 | int ret_val = 0; |
| 1846 | |
| 1847 | /* |
| 1848 | * traverse the list of heaps available in this system |
| 1849 | * and find the heap that is specified. |
| 1850 | */ |
| 1851 | mutex_lock(&dev->lock); |
| 1852 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { |
| 1853 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); |
Laura Abbott | 4afbd8b | 2013-02-15 09:21:33 -0800 | [diff] [blame] | 1854 | if (!ion_heap_allow_heap_secure(heap->type)) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1855 | continue; |
| 1856 | if (ION_HEAP(heap->id) != heap_id) |
| 1857 | continue; |
| 1858 | if (heap->ops->secure_heap) |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1859 | ret_val = heap->ops->secure_heap(heap, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1860 | else |
| 1861 | ret_val = -EINVAL; |
| 1862 | break; |
| 1863 | } |
| 1864 | mutex_unlock(&dev->lock); |
| 1865 | return ret_val; |
| 1866 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1867 | EXPORT_SYMBOL(ion_secure_heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1868 | |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1869 | int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, |
| 1870 | void *data) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1871 | { |
| 1872 | struct rb_node *n; |
| 1873 | int ret_val = 0; |
| 1874 | |
| 1875 | /* |
| 1876 | * traverse the list of heaps available in this system |
| 1877 | * and find the heap that is specified. |
| 1878 | */ |
| 1879 | mutex_lock(&dev->lock); |
| 1880 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { |
| 1881 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); |
Laura Abbott | 4afbd8b | 2013-02-15 09:21:33 -0800 | [diff] [blame] | 1882 | if (!ion_heap_allow_heap_secure(heap->type)) |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1883 | continue; |
| 1884 | if (ION_HEAP(heap->id) != heap_id) |
| 1885 | continue; |
| 1886 | if (heap->ops->secure_heap) |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 1887 | ret_val = heap->ops->unsecure_heap(heap, version, data); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1888 | else |
| 1889 | ret_val = -EINVAL; |
| 1890 | break; |
| 1891 | } |
| 1892 | mutex_unlock(&dev->lock); |
| 1893 | return ret_val; |
| 1894 | } |
Olav Haugan | bd453a9 | 2012-07-05 14:21:34 -0700 | [diff] [blame] | 1895 | EXPORT_SYMBOL(ion_unsecure_heap); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 1896 | |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1897 | static int ion_debug_leak_show(struct seq_file *s, void *unused) |
| 1898 | { |
| 1899 | struct ion_device *dev = s->private; |
| 1900 | struct rb_node *n; |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1901 | |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1902 | seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size", |
| 1903 | "ref cnt"); |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1904 | |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1905 | mutex_lock(&dev->lock); |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1906 | ion_mark_dangling_buffers_locked(dev); |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1907 | |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1908 | /* Anyone still marked as a 1 means a leaked handle somewhere */ |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1909 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
| 1910 | struct ion_buffer *buf = rb_entry(n, struct ion_buffer, |
| 1911 | node); |
| 1912 | |
| 1913 | if (buf->marked == 1) |
| 1914 | seq_printf(s, "%16.x %16.s %16.x %16.d\n", |
| 1915 | (int)buf, buf->heap->name, buf->size, |
| 1916 | atomic_read(&buf->ref.refcount)); |
| 1917 | } |
| 1918 | mutex_unlock(&dev->lock); |
| 1919 | return 0; |
| 1920 | } |
| 1921 | |
| 1922 | static int ion_debug_leak_open(struct inode *inode, struct file *file) |
| 1923 | { |
| 1924 | return single_open(file, ion_debug_leak_show, inode->i_private); |
| 1925 | } |
| 1926 | |
| 1927 | static const struct file_operations debug_leak_fops = { |
| 1928 | .open = ion_debug_leak_open, |
| 1929 | .read = seq_read, |
| 1930 | .llseek = seq_lseek, |
| 1931 | .release = single_release, |
| 1932 | }; |
| 1933 | |
| 1934 | |
| 1935 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1936 | struct ion_device *ion_device_create(long (*custom_ioctl) |
| 1937 | (struct ion_client *client, |
| 1938 | unsigned int cmd, |
| 1939 | unsigned long arg)) |
| 1940 | { |
| 1941 | struct ion_device *idev; |
| 1942 | int ret; |
| 1943 | |
| 1944 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); |
| 1945 | if (!idev) |
| 1946 | return ERR_PTR(-ENOMEM); |
| 1947 | |
| 1948 | idev->dev.minor = MISC_DYNAMIC_MINOR; |
| 1949 | idev->dev.name = "ion"; |
| 1950 | idev->dev.fops = &ion_fops; |
| 1951 | idev->dev.parent = NULL; |
| 1952 | ret = misc_register(&idev->dev); |
| 1953 | if (ret) { |
| 1954 | pr_err("ion: failed to register misc device.\n"); |
| 1955 | return ERR_PTR(ret); |
| 1956 | } |
| 1957 | |
| 1958 | idev->debug_root = debugfs_create_dir("ion", NULL); |
| 1959 | if (IS_ERR_OR_NULL(idev->debug_root)) |
| 1960 | pr_err("ion: failed to create debug files.\n"); |
| 1961 | |
| 1962 | idev->custom_ioctl = custom_ioctl; |
| 1963 | idev->buffers = RB_ROOT; |
| 1964 | mutex_init(&idev->lock); |
| 1965 | idev->heaps = RB_ROOT; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1966 | idev->clients = RB_ROOT; |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 1967 | debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev, |
| 1968 | &debug_leak_fops); |
Mitchel Humpherys | a75e4eb | 2012-12-14 16:12:23 -0800 | [diff] [blame] | 1969 | |
| 1970 | setup_ion_leak_check(idev->debug_root); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1971 | return idev; |
| 1972 | } |
| 1973 | |
| 1974 | void ion_device_destroy(struct ion_device *dev) |
| 1975 | { |
| 1976 | misc_deregister(&dev->dev); |
| 1977 | /* XXX need to free the heaps and clients ? */ |
| 1978 | kfree(dev); |
| 1979 | } |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 1980 | |
| 1981 | void __init ion_reserve(struct ion_platform_data *data) |
| 1982 | { |
| 1983 | int i, ret; |
| 1984 | |
| 1985 | for (i = 0; i < data->nr; i++) { |
| 1986 | if (data->heaps[i].size == 0) |
| 1987 | continue; |
| 1988 | ret = memblock_reserve(data->heaps[i].base, |
| 1989 | data->heaps[i].size); |
| 1990 | if (ret) |
| 1991 | pr_err("memblock reserve of %x@%lx failed\n", |
| 1992 | data->heaps[i].size, |
| 1993 | data->heaps[i].base); |
| 1994 | } |
| 1995 | } |