| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * drivers/gpu/ion/ion.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2011 Google, Inc. | 
|  | 5 | * | 
|  | 6 | * This software is licensed under the terms of the GNU General Public | 
|  | 7 | * License version 2, as published by the Free Software Foundation, and | 
|  | 8 | * may be copied, distributed, and modified under those terms. | 
|  | 9 | * | 
|  | 10 | * This program is distributed in the hope that it will be useful, | 
|  | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 13 | * GNU General Public License for more details. | 
|  | 14 | * | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #include <linux/device.h> | 
|  | 18 | #include <linux/file.h> | 
|  | 19 | #include <linux/fs.h> | 
|  | 20 | #include <linux/anon_inodes.h> | 
|  | 21 | #include <linux/ion.h> | 
|  | 22 | #include <linux/list.h> | 
|  | 23 | #include <linux/miscdevice.h> | 
|  | 24 | #include <linux/mm.h> | 
|  | 25 | #include <linux/mm_types.h> | 
|  | 26 | #include <linux/rbtree.h> | 
|  | 27 | #include <linux/sched.h> | 
|  | 28 | #include <linux/slab.h> | 
|  | 29 | #include <linux/seq_file.h> | 
|  | 30 | #include <linux/uaccess.h> | 
|  | 31 | #include <linux/debugfs.h> | 
|  | 32 |  | 
|  | 33 | #include "ion_priv.h" | 
|  | 34 | #define DEBUG | 
|  | 35 |  | 
|  | 36 | /** | 
|  | 37 | * struct ion_device - the metadata of the ion device node | 
|  | 38 | * @dev:		the actual misc device | 
|  | 39 | * @buffers:	an rb tree of all the existing buffers | 
|  | 40 | * @lock:		lock protecting the buffers & heaps trees | 
|  | 41 | * @heaps:		list of all the heaps in the system | 
|  | 42 | * @user_clients:	list of all the clients created from userspace | 
|  | 43 | */ | 
|  | 44 | struct ion_device { | 
|  | 45 | struct miscdevice dev; | 
|  | 46 | struct rb_root buffers; | 
|  | 47 | struct mutex lock; | 
|  | 48 | struct rb_root heaps; | 
|  | 49 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, | 
|  | 50 | unsigned long arg); | 
|  | 51 | struct rb_root user_clients; | 
|  | 52 | struct rb_root kernel_clients; | 
|  | 53 | struct dentry *debug_root; | 
|  | 54 | }; | 
|  | 55 |  | 
|  | 56 | /** | 
|  | 57 | * struct ion_client - a process/hw block local address space | 
|  | 58 | * @ref:		for reference counting the client | 
|  | 59 | * @node:		node in the tree of all clients | 
|  | 60 | * @dev:		backpointer to ion device | 
|  | 61 | * @handles:		an rb tree of all the handles in this client | 
|  | 62 | * @lock:		lock protecting the tree of handles | 
|  | 63 | * @heap_mask:		mask of all supported heaps | 
|  | 64 | * @name:		used for debugging | 
|  | 65 | * @task:		used for debugging | 
|  | 66 | * | 
|  | 67 | * A client represents a list of buffers this client may access. | 
|  | 68 | * The mutex stored here is used to protect both handles tree | 
|  | 69 | * as well as the handles themselves, and should be held while modifying either. | 
|  | 70 | */ | 
|  | 71 | struct ion_client { | 
|  | 72 | struct kref ref; | 
|  | 73 | struct rb_node node; | 
|  | 74 | struct ion_device *dev; | 
|  | 75 | struct rb_root handles; | 
|  | 76 | struct mutex lock; | 
|  | 77 | unsigned int heap_mask; | 
|  | 78 | const char *name; | 
|  | 79 | struct task_struct *task; | 
|  | 80 | pid_t pid; | 
|  | 81 | struct dentry *debug_root; | 
|  | 82 | }; | 
|  | 83 |  | 
|  | 84 | /** | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 85 | * ion_handle - a client local reference to a buffer | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 86 | * @ref:		reference count | 
|  | 87 | * @client:		back pointer to the client the buffer resides in | 
|  | 88 | * @buffer:		pointer to the buffer | 
|  | 89 | * @node:		node in the client's handle rbtree | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 90 | * @kmap_cnt:		count of times this client has mapped to kernel | 
|  | 91 | * @dmap_cnt:		count of times this client has mapped for dma | 
|  | 92 | * @usermap_cnt:	count of times this client has mapped for userspace | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 93 | * | 
|  | 94 | * Modifications to node, map_cnt or mapping should be protected by the | 
|  | 95 | * lock in the client.  Other fields are never changed after initialization. | 
|  | 96 | */ | 
|  | 97 | struct ion_handle { | 
|  | 98 | struct kref ref; | 
|  | 99 | struct ion_client *client; | 
|  | 100 | struct ion_buffer *buffer; | 
|  | 101 | struct rb_node node; | 
|  | 102 | unsigned int kmap_cnt; | 
|  | 103 | unsigned int dmap_cnt; | 
|  | 104 | unsigned int usermap_cnt; | 
|  | 105 | }; | 
|  | 106 |  | 
|  | 107 | /* this function should only be called while dev->lock is held */ | 
|  | 108 | static void ion_buffer_add(struct ion_device *dev, | 
|  | 109 | struct ion_buffer *buffer) | 
|  | 110 | { | 
|  | 111 | struct rb_node **p = &dev->buffers.rb_node; | 
|  | 112 | struct rb_node *parent = NULL; | 
|  | 113 | struct ion_buffer *entry; | 
|  | 114 |  | 
|  | 115 | while (*p) { | 
|  | 116 | parent = *p; | 
|  | 117 | entry = rb_entry(parent, struct ion_buffer, node); | 
|  | 118 |  | 
| Rebecca Schultz Zavin | f9fb95e | 2011-06-30 18:09:05 -0700 | [diff] [blame] | 119 | if (buffer < entry) { | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 120 | p = &(*p)->rb_left; | 
| Rebecca Schultz Zavin | f9fb95e | 2011-06-30 18:09:05 -0700 | [diff] [blame] | 121 | } else if (buffer > entry) { | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 122 | p = &(*p)->rb_right; | 
| Rebecca Schultz Zavin | f9fb95e | 2011-06-30 18:09:05 -0700 | [diff] [blame] | 123 | } else { | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 124 | pr_err("%s: buffer already found.", __func__); | 
|  | 125 | BUG(); | 
| Rebecca Schultz Zavin | f9fb95e | 2011-06-30 18:09:05 -0700 | [diff] [blame] | 126 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 127 | } | 
|  | 128 |  | 
|  | 129 | rb_link_node(&buffer->node, parent, p); | 
|  | 130 | rb_insert_color(&buffer->node, &dev->buffers); | 
|  | 131 | } | 
|  | 132 |  | 
|  | 133 | /* this function should only be called while dev->lock is held */ | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 134 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 135 | struct ion_device *dev, | 
|  | 136 | unsigned long len, | 
|  | 137 | unsigned long align, | 
|  | 138 | unsigned long flags) | 
|  | 139 | { | 
|  | 140 | struct ion_buffer *buffer; | 
|  | 141 | int ret; | 
|  | 142 |  | 
|  | 143 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | 
|  | 144 | if (!buffer) | 
|  | 145 | return ERR_PTR(-ENOMEM); | 
|  | 146 |  | 
|  | 147 | buffer->heap = heap; | 
|  | 148 | kref_init(&buffer->ref); | 
|  | 149 |  | 
|  | 150 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 151 | if (ret) { | 
|  | 152 | kfree(buffer); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 153 | return ERR_PTR(ret); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 154 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 155 | buffer->dev = dev; | 
|  | 156 | buffer->size = len; | 
|  | 157 | mutex_init(&buffer->lock); | 
|  | 158 | ion_buffer_add(dev, buffer); | 
|  | 159 | return buffer; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | static void ion_buffer_destroy(struct kref *kref) | 
|  | 163 | { | 
|  | 164 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | 
|  | 165 | struct ion_device *dev = buffer->dev; | 
|  | 166 |  | 
|  | 167 | buffer->heap->ops->free(buffer); | 
|  | 168 | mutex_lock(&dev->lock); | 
|  | 169 | rb_erase(&buffer->node, &dev->buffers); | 
|  | 170 | mutex_unlock(&dev->lock); | 
|  | 171 | kfree(buffer); | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | static void ion_buffer_get(struct ion_buffer *buffer) | 
|  | 175 | { | 
|  | 176 | kref_get(&buffer->ref); | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | static int ion_buffer_put(struct ion_buffer *buffer) | 
|  | 180 | { | 
|  | 181 | return kref_put(&buffer->ref, ion_buffer_destroy); | 
|  | 182 | } | 
|  | 183 |  | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 184 | static struct ion_handle *ion_handle_create(struct ion_client *client, | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 185 | struct ion_buffer *buffer) | 
|  | 186 | { | 
|  | 187 | struct ion_handle *handle; | 
|  | 188 |  | 
|  | 189 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | 
|  | 190 | if (!handle) | 
|  | 191 | return ERR_PTR(-ENOMEM); | 
|  | 192 | kref_init(&handle->ref); | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 193 | rb_init_node(&handle->node); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 194 | handle->client = client; | 
|  | 195 | ion_buffer_get(buffer); | 
|  | 196 | handle->buffer = buffer; | 
|  | 197 |  | 
|  | 198 | return handle; | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | static void ion_handle_destroy(struct kref *kref) | 
|  | 202 | { | 
|  | 203 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | 
|  | 204 | /* XXX Can a handle be destroyed while it's map count is non-zero?: | 
|  | 205 | if (handle->map_cnt) unmap | 
|  | 206 | */ | 
|  | 207 | ion_buffer_put(handle->buffer); | 
|  | 208 | mutex_lock(&handle->client->lock); | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 209 | if (!RB_EMPTY_NODE(&handle->node)) | 
|  | 210 | rb_erase(&handle->node, &handle->client->handles); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 211 | mutex_unlock(&handle->client->lock); | 
|  | 212 | kfree(handle); | 
|  | 213 | } | 
|  | 214 |  | 
|  | 215 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | 
|  | 216 | { | 
|  | 217 | return handle->buffer; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | static void ion_handle_get(struct ion_handle *handle) | 
|  | 221 | { | 
|  | 222 | kref_get(&handle->ref); | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 | static int ion_handle_put(struct ion_handle *handle) | 
|  | 226 | { | 
|  | 227 | return kref_put(&handle->ref, ion_handle_destroy); | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | 
|  | 231 | struct ion_buffer *buffer) | 
|  | 232 | { | 
|  | 233 | struct rb_node *n; | 
|  | 234 |  | 
|  | 235 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | 
|  | 236 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | 
|  | 237 | node); | 
|  | 238 | if (handle->buffer == buffer) | 
|  | 239 | return handle; | 
|  | 240 | } | 
|  | 241 | return NULL; | 
|  | 242 | } | 
|  | 243 |  | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 244 | static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 245 | { | 
|  | 246 | struct rb_node *n = client->handles.rb_node; | 
|  | 247 |  | 
|  | 248 | while (n) { | 
|  | 249 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | 
|  | 250 | node); | 
|  | 251 | if (handle < handle_node) | 
|  | 252 | n = n->rb_left; | 
|  | 253 | else if (handle > handle_node) | 
|  | 254 | n = n->rb_right; | 
|  | 255 | else | 
|  | 256 | return true; | 
|  | 257 | } | 
|  | 258 | return false; | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 | static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | 
|  | 262 | { | 
|  | 263 | struct rb_node **p = &client->handles.rb_node; | 
|  | 264 | struct rb_node *parent = NULL; | 
|  | 265 | struct ion_handle *entry; | 
|  | 266 |  | 
|  | 267 | while (*p) { | 
|  | 268 | parent = *p; | 
|  | 269 | entry = rb_entry(parent, struct ion_handle, node); | 
|  | 270 |  | 
|  | 271 | if (handle < entry) | 
|  | 272 | p = &(*p)->rb_left; | 
|  | 273 | else if (handle > entry) | 
|  | 274 | p = &(*p)->rb_right; | 
|  | 275 | else | 
|  | 276 | WARN(1, "%s: buffer already found.", __func__); | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | rb_link_node(&handle->node, parent, p); | 
|  | 280 | rb_insert_color(&handle->node, &client->handles); | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | 
|  | 284 | size_t align, unsigned int flags) | 
|  | 285 | { | 
|  | 286 | struct rb_node *n; | 
|  | 287 | struct ion_handle *handle; | 
|  | 288 | struct ion_device *dev = client->dev; | 
|  | 289 | struct ion_buffer *buffer = NULL; | 
|  | 290 |  | 
|  | 291 | /* | 
|  | 292 | * traverse the list of heaps available in this system in priority | 
|  | 293 | * order.  If the heap type is supported by the client, and matches the | 
|  | 294 | * request of the caller allocate from it.  Repeat until allocate has | 
|  | 295 | * succeeded or all heaps have been tried | 
|  | 296 | */ | 
|  | 297 | mutex_lock(&dev->lock); | 
|  | 298 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | 
|  | 299 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | 
|  | 300 | /* if the client doesn't support this heap type */ | 
|  | 301 | if (!((1 << heap->type) & client->heap_mask)) | 
|  | 302 | continue; | 
|  | 303 | /* if the caller didn't specify this heap type */ | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 304 | if (!((1 << heap->id) & flags)) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 305 | continue; | 
|  | 306 | buffer = ion_buffer_create(heap, dev, len, align, flags); | 
|  | 307 | if (!IS_ERR_OR_NULL(buffer)) | 
|  | 308 | break; | 
|  | 309 | } | 
|  | 310 | mutex_unlock(&dev->lock); | 
|  | 311 |  | 
|  | 312 | if (IS_ERR_OR_NULL(buffer)) | 
|  | 313 | return ERR_PTR(PTR_ERR(buffer)); | 
|  | 314 |  | 
|  | 315 | handle = ion_handle_create(client, buffer); | 
|  | 316 |  | 
|  | 317 | if (IS_ERR_OR_NULL(handle)) | 
|  | 318 | goto end; | 
|  | 319 |  | 
|  | 320 | /* | 
|  | 321 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | 
|  | 322 | * and ion_handle_create will take a second reference, drop one here | 
|  | 323 | */ | 
|  | 324 | ion_buffer_put(buffer); | 
|  | 325 |  | 
|  | 326 | mutex_lock(&client->lock); | 
|  | 327 | ion_handle_add(client, handle); | 
|  | 328 | mutex_unlock(&client->lock); | 
|  | 329 | return handle; | 
|  | 330 |  | 
|  | 331 | end: | 
|  | 332 | ion_buffer_put(buffer); | 
|  | 333 | return handle; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | void ion_free(struct ion_client *client, struct ion_handle *handle) | 
|  | 337 | { | 
| Rebecca Schultz Zavin | c72866d | 2011-07-07 17:07:56 -0700 | [diff] [blame] | 338 | bool valid_handle; | 
|  | 339 |  | 
|  | 340 | BUG_ON(client != handle->client); | 
|  | 341 |  | 
|  | 342 | mutex_lock(&client->lock); | 
|  | 343 | valid_handle = ion_handle_validate(client, handle); | 
|  | 344 | mutex_unlock(&client->lock); | 
|  | 345 |  | 
|  | 346 | if (!valid_handle) { | 
|  | 347 | WARN("%s: invalid handle passed to free.\n", __func__); | 
|  | 348 | return; | 
|  | 349 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 350 | ion_handle_put(handle); | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | static void ion_client_get(struct ion_client *client); | 
|  | 354 | static int ion_client_put(struct ion_client *client); | 
|  | 355 |  | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 356 | static bool _ion_map(int *buffer_cnt, int *handle_cnt) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 357 | { | 
|  | 358 | bool map; | 
|  | 359 |  | 
|  | 360 | BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); | 
|  | 361 |  | 
|  | 362 | if (*buffer_cnt) | 
|  | 363 | map = false; | 
|  | 364 | else | 
|  | 365 | map = true; | 
|  | 366 | if (*handle_cnt == 0) | 
|  | 367 | (*buffer_cnt)++; | 
|  | 368 | (*handle_cnt)++; | 
|  | 369 | return map; | 
|  | 370 | } | 
|  | 371 |  | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 372 | static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 373 | { | 
|  | 374 | BUG_ON(*handle_cnt == 0); | 
|  | 375 | (*handle_cnt)--; | 
|  | 376 | if (*handle_cnt != 0) | 
|  | 377 | return false; | 
|  | 378 | BUG_ON(*buffer_cnt == 0); | 
|  | 379 | (*buffer_cnt)--; | 
|  | 380 | if (*buffer_cnt == 0) | 
|  | 381 | return true; | 
|  | 382 | return false; | 
|  | 383 | } | 
|  | 384 |  | 
|  | 385 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | 
|  | 386 | ion_phys_addr_t *addr, size_t *len) | 
|  | 387 | { | 
|  | 388 | struct ion_buffer *buffer; | 
|  | 389 | int ret; | 
|  | 390 |  | 
|  | 391 | mutex_lock(&client->lock); | 
|  | 392 | if (!ion_handle_validate(client, handle)) { | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 393 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 394 | return -EINVAL; | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | buffer = handle->buffer; | 
|  | 398 |  | 
|  | 399 | if (!buffer->heap->ops->phys) { | 
|  | 400 | pr_err("%s: ion_phys is not implemented by this heap.\n", | 
|  | 401 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 402 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 403 | return -ENODEV; | 
|  | 404 | } | 
|  | 405 | mutex_unlock(&client->lock); | 
|  | 406 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | 
|  | 407 | return ret; | 
|  | 408 | } | 
|  | 409 |  | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 410 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, | 
|  | 411 | unsigned long flags) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 412 | { | 
|  | 413 | struct ion_buffer *buffer; | 
|  | 414 | void *vaddr; | 
|  | 415 |  | 
|  | 416 | mutex_lock(&client->lock); | 
|  | 417 | if (!ion_handle_validate(client, handle)) { | 
|  | 418 | pr_err("%s: invalid handle passed to map_kernel.\n", | 
|  | 419 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 420 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 421 | return ERR_PTR(-EINVAL); | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | buffer = handle->buffer; | 
|  | 425 | mutex_lock(&buffer->lock); | 
|  | 426 |  | 
|  | 427 | if (!handle->buffer->heap->ops->map_kernel) { | 
|  | 428 | pr_err("%s: map_kernel is not implemented by this heap.\n", | 
|  | 429 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 430 | mutex_unlock(&buffer->lock); | 
|  | 431 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 432 | return ERR_PTR(-ENODEV); | 
|  | 433 | } | 
|  | 434 |  | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 435 | if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { | 
|  | 436 | if (buffer->flags != flags) { | 
|  | 437 | pr_err("%s: buffer was already mapped with flags %lx," | 
|  | 438 | " cannot map with flags %lx\n", __func__, | 
|  | 439 | buffer->flags, flags); | 
|  | 440 | vaddr = ERR_PTR(-EEXIST); | 
|  | 441 | goto out; | 
|  | 442 | } | 
|  | 443 |  | 
|  | 444 | } else { | 
|  | 445 | buffer->flags = flags; | 
|  | 446 | } | 
|  | 447 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 448 | if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 449 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer, | 
|  | 450 | flags); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 451 | if (IS_ERR_OR_NULL(vaddr)) | 
|  | 452 | _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); | 
|  | 453 | buffer->vaddr = vaddr; | 
|  | 454 | } else { | 
|  | 455 | vaddr = buffer->vaddr; | 
|  | 456 | } | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 457 |  | 
|  | 458 | out: | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 459 | mutex_unlock(&buffer->lock); | 
|  | 460 | mutex_unlock(&client->lock); | 
|  | 461 | return vaddr; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | struct scatterlist *ion_map_dma(struct ion_client *client, | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 465 | struct ion_handle *handle, | 
|  | 466 | unsigned long flags) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 467 | { | 
|  | 468 | struct ion_buffer *buffer; | 
|  | 469 | struct scatterlist *sglist; | 
|  | 470 |  | 
|  | 471 | mutex_lock(&client->lock); | 
|  | 472 | if (!ion_handle_validate(client, handle)) { | 
|  | 473 | pr_err("%s: invalid handle passed to map_dma.\n", | 
|  | 474 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 475 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 476 | return ERR_PTR(-EINVAL); | 
|  | 477 | } | 
|  | 478 | buffer = handle->buffer; | 
|  | 479 | mutex_lock(&buffer->lock); | 
|  | 480 |  | 
|  | 481 | if (!handle->buffer->heap->ops->map_dma) { | 
|  | 482 | pr_err("%s: map_kernel is not implemented by this heap.\n", | 
|  | 483 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 484 | mutex_unlock(&buffer->lock); | 
|  | 485 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 486 | return ERR_PTR(-ENODEV); | 
|  | 487 | } | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 488 |  | 
|  | 489 | if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { | 
|  | 490 | if (buffer->flags != flags) { | 
|  | 491 | pr_err("%s: buffer was already mapped with flags %lx," | 
|  | 492 | " cannot map with flags %lx\n", __func__, | 
|  | 493 | buffer->flags, flags); | 
|  | 494 | sglist = ERR_PTR(-EEXIST); | 
|  | 495 | goto out; | 
|  | 496 | } | 
|  | 497 |  | 
|  | 498 | } else { | 
|  | 499 | buffer->flags = flags; | 
|  | 500 | } | 
|  | 501 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 502 | if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { | 
|  | 503 | sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); | 
|  | 504 | if (IS_ERR_OR_NULL(sglist)) | 
|  | 505 | _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); | 
|  | 506 | buffer->sglist = sglist; | 
|  | 507 | } else { | 
|  | 508 | sglist = buffer->sglist; | 
|  | 509 | } | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 510 |  | 
|  | 511 | out: | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 512 | mutex_unlock(&buffer->lock); | 
|  | 513 | mutex_unlock(&client->lock); | 
|  | 514 | return sglist; | 
|  | 515 | } | 
|  | 516 |  | 
|  | 517 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) | 
|  | 518 | { | 
|  | 519 | struct ion_buffer *buffer; | 
|  | 520 |  | 
|  | 521 | mutex_lock(&client->lock); | 
|  | 522 | buffer = handle->buffer; | 
|  | 523 | mutex_lock(&buffer->lock); | 
|  | 524 | if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { | 
|  | 525 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | 
|  | 526 | buffer->vaddr = NULL; | 
|  | 527 | } | 
|  | 528 | mutex_unlock(&buffer->lock); | 
|  | 529 | mutex_unlock(&client->lock); | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) | 
|  | 533 | { | 
|  | 534 | struct ion_buffer *buffer; | 
|  | 535 |  | 
|  | 536 | mutex_lock(&client->lock); | 
|  | 537 | buffer = handle->buffer; | 
|  | 538 | mutex_lock(&buffer->lock); | 
|  | 539 | if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { | 
|  | 540 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | 
|  | 541 | buffer->sglist = NULL; | 
|  | 542 | } | 
|  | 543 | mutex_unlock(&buffer->lock); | 
|  | 544 | mutex_unlock(&client->lock); | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 |  | 
|  | 548 | struct ion_buffer *ion_share(struct ion_client *client, | 
|  | 549 | struct ion_handle *handle) | 
|  | 550 | { | 
| Rebecca Schultz Zavin | c72866d | 2011-07-07 17:07:56 -0700 | [diff] [blame] | 551 | bool valid_handle; | 
|  | 552 |  | 
|  | 553 | mutex_lock(&client->lock); | 
|  | 554 | valid_handle = ion_handle_validate(client, handle); | 
|  | 555 | mutex_unlock(&client->lock); | 
|  | 556 | if (!valid_handle) { | 
|  | 557 | WARN("%s: invalid handle passed to share.\n", __func__); | 
|  | 558 | return ERR_PTR(-EINVAL); | 
|  | 559 | } | 
|  | 560 |  | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 561 | /* do not take an extra reference here, the burden is on the caller | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 562 | * to make sure the buffer doesn't go away while it's passing it | 
|  | 563 | * to another client -- ion_free should not be called on this handle | 
|  | 564 | * until the buffer has been imported into the other client | 
|  | 565 | */ | 
|  | 566 | return handle->buffer; | 
|  | 567 | } | 
|  | 568 |  | 
|  | 569 | struct ion_handle *ion_import(struct ion_client *client, | 
|  | 570 | struct ion_buffer *buffer) | 
|  | 571 | { | 
|  | 572 | struct ion_handle *handle = NULL; | 
|  | 573 |  | 
|  | 574 | mutex_lock(&client->lock); | 
|  | 575 | /* if a handle exists for this buffer just take a reference to it */ | 
|  | 576 | handle = ion_handle_lookup(client, buffer); | 
|  | 577 | if (!IS_ERR_OR_NULL(handle)) { | 
|  | 578 | ion_handle_get(handle); | 
|  | 579 | goto end; | 
|  | 580 | } | 
|  | 581 | handle = ion_handle_create(client, buffer); | 
|  | 582 | if (IS_ERR_OR_NULL(handle)) | 
|  | 583 | goto end; | 
|  | 584 | ion_handle_add(client, handle); | 
|  | 585 | end: | 
|  | 586 | mutex_unlock(&client->lock); | 
|  | 587 | return handle; | 
|  | 588 | } | 
|  | 589 |  | 
| Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame^] | 590 | static int check_vaddr_bounds(unsigned long start, unsigned long end) | 
|  | 591 | { | 
|  | 592 | struct mm_struct *mm = current->active_mm; | 
|  | 593 | struct vm_area_struct *vma; | 
|  | 594 | int ret = 1; | 
|  | 595 |  | 
|  | 596 | if (end < start) | 
|  | 597 | goto out; | 
|  | 598 |  | 
|  | 599 | down_read(&mm->mmap_sem); | 
|  | 600 | vma = find_vma(mm, start); | 
|  | 601 | if (vma && vma->vm_start < end) { | 
|  | 602 | if (start < vma->vm_start) | 
|  | 603 | goto out_up; | 
|  | 604 | if (end > vma->vm_end) | 
|  | 605 | goto out_up; | 
|  | 606 | ret = 0; | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | out_up: | 
|  | 610 | up_read(&mm->mmap_sem); | 
|  | 611 | out: | 
|  | 612 | return ret; | 
|  | 613 | } | 
|  | 614 |  | 
|  | 615 | int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, | 
|  | 616 | void *uaddr, unsigned long offset, unsigned long len, | 
|  | 617 | unsigned int cmd) | 
|  | 618 | { | 
|  | 619 | struct ion_buffer *buffer; | 
|  | 620 | unsigned long start, end; | 
|  | 621 | int ret = -EINVAL; | 
|  | 622 |  | 
|  | 623 | mutex_lock(&client->lock); | 
|  | 624 | if (!ion_handle_validate(client, handle)) { | 
|  | 625 | pr_err("%s: invalid handle passed to do_cache_op.\n", | 
|  | 626 | __func__); | 
|  | 627 | mutex_unlock(&client->lock); | 
|  | 628 | return -EINVAL; | 
|  | 629 | } | 
|  | 630 | buffer = handle->buffer; | 
|  | 631 | mutex_lock(&buffer->lock); | 
|  | 632 |  | 
|  | 633 | if (ION_IS_CACHED(buffer->flags)) { | 
|  | 634 | ret = 0; | 
|  | 635 | goto out; | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | if (!handle->buffer->heap->ops->cache_op) { | 
|  | 639 | pr_err("%s: cache_op is not implemented by this heap.\n", | 
|  | 640 | __func__); | 
|  | 641 | ret = -ENODEV; | 
|  | 642 | goto out; | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | start = (unsigned long) uaddr; | 
|  | 646 | end = (unsigned long) uaddr + len; | 
|  | 647 |  | 
|  | 648 | if (check_vaddr_bounds(start, end)) { | 
|  | 649 | pr_err("%s: virtual address %p is out of bounds\n", | 
|  | 650 | __func__, uaddr); | 
|  | 651 | goto out; | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr, | 
|  | 655 | offset, len, cmd); | 
|  | 656 |  | 
|  | 657 | out: | 
|  | 658 | mutex_unlock(&buffer->lock); | 
|  | 659 | mutex_unlock(&client->lock); | 
|  | 660 | return ret; | 
|  | 661 |  | 
|  | 662 | } | 
|  | 663 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 664 | static const struct file_operations ion_share_fops; | 
|  | 665 |  | 
|  | 666 | struct ion_handle *ion_import_fd(struct ion_client *client, int fd) | 
|  | 667 | { | 
|  | 668 | struct file *file = fget(fd); | 
|  | 669 | struct ion_handle *handle; | 
|  | 670 |  | 
|  | 671 | if (!file) { | 
|  | 672 | pr_err("%s: imported fd not found in file table.\n", __func__); | 
|  | 673 | return ERR_PTR(-EINVAL); | 
|  | 674 | } | 
|  | 675 | if (file->f_op != &ion_share_fops) { | 
|  | 676 | pr_err("%s: imported file is not a shared ion file.\n", | 
|  | 677 | __func__); | 
|  | 678 | handle = ERR_PTR(-EINVAL); | 
|  | 679 | goto end; | 
|  | 680 | } | 
|  | 681 | handle = ion_import(client, file->private_data); | 
|  | 682 | end: | 
|  | 683 | fput(file); | 
|  | 684 | return handle; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | static int ion_debug_client_show(struct seq_file *s, void *unused) | 
|  | 688 | { | 
|  | 689 | struct ion_client *client = s->private; | 
|  | 690 | struct rb_node *n; | 
|  | 691 | size_t sizes[ION_NUM_HEAPS] = {0}; | 
|  | 692 | const char *names[ION_NUM_HEAPS] = {0}; | 
|  | 693 | int i; | 
|  | 694 |  | 
|  | 695 | mutex_lock(&client->lock); | 
|  | 696 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | 
|  | 697 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | 
|  | 698 | node); | 
|  | 699 | enum ion_heap_type type = handle->buffer->heap->type; | 
|  | 700 |  | 
|  | 701 | if (!names[type]) | 
|  | 702 | names[type] = handle->buffer->heap->name; | 
|  | 703 | sizes[type] += handle->buffer->size; | 
|  | 704 | } | 
|  | 705 | mutex_unlock(&client->lock); | 
|  | 706 |  | 
|  | 707 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | 
|  | 708 | for (i = 0; i < ION_NUM_HEAPS; i++) { | 
|  | 709 | if (!names[i]) | 
|  | 710 | continue; | 
|  | 711 | seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], | 
|  | 712 | atomic_read(&client->ref.refcount)); | 
|  | 713 | } | 
|  | 714 | return 0; | 
|  | 715 | } | 
|  | 716 |  | 
|  | 717 | static int ion_debug_client_open(struct inode *inode, struct file *file) | 
|  | 718 | { | 
|  | 719 | return single_open(file, ion_debug_client_show, inode->i_private); | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | static const struct file_operations debug_client_fops = { | 
|  | 723 | .open = ion_debug_client_open, | 
|  | 724 | .read = seq_read, | 
|  | 725 | .llseek = seq_lseek, | 
|  | 726 | .release = single_release, | 
|  | 727 | }; | 
|  | 728 |  | 
| Rebecca Schultz Zavin | 83e3dab | 2011-07-01 20:41:25 -0700 | [diff] [blame] | 729 | static struct ion_client *ion_client_lookup(struct ion_device *dev, | 
|  | 730 | struct task_struct *task) | 
|  | 731 | { | 
|  | 732 | struct rb_node *n = dev->user_clients.rb_node; | 
|  | 733 | struct ion_client *client; | 
|  | 734 |  | 
|  | 735 | mutex_lock(&dev->lock); | 
|  | 736 | while (n) { | 
|  | 737 | client = rb_entry(n, struct ion_client, node); | 
|  | 738 | if (task == client->task) { | 
|  | 739 | ion_client_get(client); | 
|  | 740 | mutex_unlock(&dev->lock); | 
|  | 741 | return client; | 
|  | 742 | } else if (task < client->task) { | 
|  | 743 | n = n->rb_left; | 
|  | 744 | } else if (task > client->task) { | 
|  | 745 | n = n->rb_right; | 
|  | 746 | } | 
|  | 747 | } | 
|  | 748 | mutex_unlock(&dev->lock); | 
|  | 749 | return NULL; | 
|  | 750 | } | 
|  | 751 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 752 | struct ion_client *ion_client_create(struct ion_device *dev, | 
|  | 753 | unsigned int heap_mask, | 
|  | 754 | const char *name) | 
|  | 755 | { | 
|  | 756 | struct ion_client *client; | 
|  | 757 | struct task_struct *task; | 
|  | 758 | struct rb_node **p; | 
|  | 759 | struct rb_node *parent = NULL; | 
|  | 760 | struct ion_client *entry; | 
|  | 761 | char debug_name[64]; | 
| Rebecca Schultz Zavin | 83e3dab | 2011-07-01 20:41:25 -0700 | [diff] [blame] | 762 | pid_t pid; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 763 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 764 | get_task_struct(current->group_leader); | 
|  | 765 | task_lock(current->group_leader); | 
| Rebecca Schultz Zavin | 83e3dab | 2011-07-01 20:41:25 -0700 | [diff] [blame] | 766 | pid = task_pid_nr(current->group_leader); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 767 | /* don't bother to store task struct for kernel threads, | 
|  | 768 | they can't be killed anyway */ | 
|  | 769 | if (current->group_leader->flags & PF_KTHREAD) { | 
|  | 770 | put_task_struct(current->group_leader); | 
|  | 771 | task = NULL; | 
|  | 772 | } else { | 
|  | 773 | task = current->group_leader; | 
|  | 774 | } | 
|  | 775 | task_unlock(current->group_leader); | 
| Rebecca Schultz Zavin | 83e3dab | 2011-07-01 20:41:25 -0700 | [diff] [blame] | 776 |  | 
|  | 777 | /* if this isn't a kernel thread, see if a client already | 
|  | 778 | exists */ | 
|  | 779 | if (task) { | 
|  | 780 | client = ion_client_lookup(dev, task); | 
|  | 781 | if (!IS_ERR_OR_NULL(client)) { | 
|  | 782 | put_task_struct(current->group_leader); | 
|  | 783 | return client; | 
|  | 784 | } | 
|  | 785 | } | 
|  | 786 |  | 
|  | 787 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); | 
|  | 788 | if (!client) { | 
|  | 789 | put_task_struct(current->group_leader); | 
|  | 790 | return ERR_PTR(-ENOMEM); | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | client->dev = dev; | 
|  | 794 | client->handles = RB_ROOT; | 
|  | 795 | mutex_init(&client->lock); | 
|  | 796 | client->name = name; | 
|  | 797 | client->heap_mask = heap_mask; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 798 | client->task = task; | 
| Rebecca Schultz Zavin | 83e3dab | 2011-07-01 20:41:25 -0700 | [diff] [blame] | 799 | client->pid = pid; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 800 | kref_init(&client->ref); | 
|  | 801 |  | 
|  | 802 | mutex_lock(&dev->lock); | 
|  | 803 | if (task) { | 
|  | 804 | p = &dev->user_clients.rb_node; | 
|  | 805 | while (*p) { | 
|  | 806 | parent = *p; | 
|  | 807 | entry = rb_entry(parent, struct ion_client, node); | 
|  | 808 |  | 
|  | 809 | if (task < entry->task) | 
|  | 810 | p = &(*p)->rb_left; | 
|  | 811 | else if (task > entry->task) | 
|  | 812 | p = &(*p)->rb_right; | 
|  | 813 | } | 
|  | 814 | rb_link_node(&client->node, parent, p); | 
|  | 815 | rb_insert_color(&client->node, &dev->user_clients); | 
|  | 816 | } else { | 
|  | 817 | p = &dev->kernel_clients.rb_node; | 
|  | 818 | while (*p) { | 
|  | 819 | parent = *p; | 
|  | 820 | entry = rb_entry(parent, struct ion_client, node); | 
|  | 821 |  | 
|  | 822 | if (client < entry) | 
|  | 823 | p = &(*p)->rb_left; | 
|  | 824 | else if (client > entry) | 
|  | 825 | p = &(*p)->rb_right; | 
|  | 826 | } | 
|  | 827 | rb_link_node(&client->node, parent, p); | 
|  | 828 | rb_insert_color(&client->node, &dev->kernel_clients); | 
|  | 829 | } | 
|  | 830 |  | 
|  | 831 | snprintf(debug_name, 64, "%u", client->pid); | 
|  | 832 | client->debug_root = debugfs_create_file(debug_name, 0664, | 
|  | 833 | dev->debug_root, client, | 
|  | 834 | &debug_client_fops); | 
|  | 835 | mutex_unlock(&dev->lock); | 
|  | 836 |  | 
|  | 837 | return client; | 
|  | 838 | } | 
|  | 839 |  | 
| Rebecca Schultz Zavin | 0b7e8ae | 2011-07-06 18:07:01 -0700 | [diff] [blame] | 840 | static void _ion_client_destroy(struct kref *kref) | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 841 | { | 
| Rebecca Schultz Zavin | 0b7e8ae | 2011-07-06 18:07:01 -0700 | [diff] [blame] | 842 | struct ion_client *client = container_of(kref, struct ion_client, ref); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 843 | struct ion_device *dev = client->dev; | 
|  | 844 | struct rb_node *n; | 
|  | 845 |  | 
|  | 846 | pr_debug("%s: %d\n", __func__, __LINE__); | 
|  | 847 | while ((n = rb_first(&client->handles))) { | 
|  | 848 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | 
|  | 849 | node); | 
|  | 850 | ion_handle_destroy(&handle->ref); | 
|  | 851 | } | 
|  | 852 | mutex_lock(&dev->lock); | 
|  | 853 | if (client->task) { | 
|  | 854 | rb_erase(&client->node, &dev->user_clients); | 
|  | 855 | put_task_struct(client->task); | 
|  | 856 | } else { | 
|  | 857 | rb_erase(&client->node, &dev->kernel_clients); | 
|  | 858 | } | 
|  | 859 | debugfs_remove_recursive(client->debug_root); | 
|  | 860 | mutex_unlock(&dev->lock); | 
|  | 861 |  | 
|  | 862 | kfree(client); | 
|  | 863 | } | 
|  | 864 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 865 | static void ion_client_get(struct ion_client *client) | 
|  | 866 | { | 
|  | 867 | kref_get(&client->ref); | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | static int ion_client_put(struct ion_client *client) | 
|  | 871 | { | 
|  | 872 | return kref_put(&client->ref, _ion_client_destroy); | 
|  | 873 | } | 
|  | 874 |  | 
| Rebecca Schultz Zavin | 0b7e8ae | 2011-07-06 18:07:01 -0700 | [diff] [blame] | 875 | void ion_client_destroy(struct ion_client *client) | 
|  | 876 | { | 
|  | 877 | ion_client_put(client); | 
|  | 878 | } | 
|  | 879 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 880 | static int ion_share_release(struct inode *inode, struct file* file) | 
|  | 881 | { | 
|  | 882 | struct ion_buffer *buffer = file->private_data; | 
|  | 883 |  | 
|  | 884 | pr_debug("%s: %d\n", __func__, __LINE__); | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 885 | mutex_lock(&buffer->lock); | 
|  | 886 | buffer->umap_cnt--; | 
|  | 887 | mutex_unlock(&buffer->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 888 | /* drop the reference to the buffer -- this prevents the | 
|  | 889 | buffer from going away because the client holding it exited | 
|  | 890 | while it was being passed */ | 
|  | 891 | ion_buffer_put(buffer); | 
|  | 892 | return 0; | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | static void ion_vma_open(struct vm_area_struct *vma) | 
|  | 896 | { | 
|  | 897 |  | 
|  | 898 | struct ion_buffer *buffer = vma->vm_file->private_data; | 
|  | 899 | struct ion_handle *handle = vma->vm_private_data; | 
|  | 900 | struct ion_client *client; | 
|  | 901 |  | 
|  | 902 | pr_debug("%s: %d\n", __func__, __LINE__); | 
|  | 903 | /* check that the client still exists and take a reference so | 
|  | 904 | it can't go away until this vma is closed */ | 
|  | 905 | client = ion_client_lookup(buffer->dev, current->group_leader); | 
|  | 906 | if (IS_ERR_OR_NULL(client)) { | 
|  | 907 | vma->vm_private_data = NULL; | 
|  | 908 | return; | 
|  | 909 | } | 
|  | 910 | pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", | 
|  | 911 | __func__, __LINE__, | 
|  | 912 | atomic_read(&client->ref.refcount), | 
|  | 913 | atomic_read(&handle->ref.refcount), | 
|  | 914 | atomic_read(&buffer->ref.refcount)); | 
|  | 915 | } | 
|  | 916 |  | 
|  | 917 | static void ion_vma_close(struct vm_area_struct *vma) | 
|  | 918 | { | 
|  | 919 | struct ion_handle *handle = vma->vm_private_data; | 
|  | 920 | struct ion_buffer *buffer = vma->vm_file->private_data; | 
|  | 921 | struct ion_client *client; | 
|  | 922 |  | 
|  | 923 | pr_debug("%s: %d\n", __func__, __LINE__); | 
|  | 924 | /* this indicates the client is gone, nothing to do here */ | 
|  | 925 | if (!handle) | 
|  | 926 | return; | 
|  | 927 | client = handle->client; | 
|  | 928 | pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", | 
|  | 929 | __func__, __LINE__, | 
|  | 930 | atomic_read(&client->ref.refcount), | 
|  | 931 | atomic_read(&handle->ref.refcount), | 
|  | 932 | atomic_read(&buffer->ref.refcount)); | 
|  | 933 | ion_handle_put(handle); | 
|  | 934 | ion_client_put(client); | 
|  | 935 | pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", | 
|  | 936 | __func__, __LINE__, | 
|  | 937 | atomic_read(&client->ref.refcount), | 
|  | 938 | atomic_read(&handle->ref.refcount), | 
|  | 939 | atomic_read(&buffer->ref.refcount)); | 
|  | 940 | } | 
|  | 941 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 942 | static struct vm_operations_struct ion_vm_ops = { | 
|  | 943 | .open = ion_vma_open, | 
|  | 944 | .close = ion_vma_close, | 
|  | 945 | }; | 
|  | 946 |  | 
|  | 947 | static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) | 
|  | 948 | { | 
|  | 949 | struct ion_buffer *buffer = file->private_data; | 
|  | 950 | unsigned long size = vma->vm_end - vma->vm_start; | 
|  | 951 | struct ion_client *client; | 
|  | 952 | struct ion_handle *handle; | 
|  | 953 | int ret; | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 954 | unsigned long flags = file->f_flags & O_DSYNC ? | 
|  | 955 | ION_SET_CACHE(UNCACHED) : | 
|  | 956 | ION_SET_CACHE(CACHED); | 
|  | 957 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 958 |  | 
|  | 959 | pr_debug("%s: %d\n", __func__, __LINE__); | 
|  | 960 | /* make sure the client still exists, it's possible for the client to | 
|  | 961 | have gone away but the map/share fd still to be around, take | 
|  | 962 | a reference to it so it can't go away while this mapping exists */ | 
|  | 963 | client = ion_client_lookup(buffer->dev, current->group_leader); | 
|  | 964 | if (IS_ERR_OR_NULL(client)) { | 
|  | 965 | pr_err("%s: trying to mmap an ion handle in a process with no " | 
|  | 966 | "ion client\n", __func__); | 
|  | 967 | return -EINVAL; | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 | if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > | 
|  | 971 | buffer->size)) { | 
|  | 972 | pr_err("%s: trying to map larger area than handle has available" | 
|  | 973 | "\n", __func__); | 
|  | 974 | ret = -EINVAL; | 
|  | 975 | goto err; | 
|  | 976 | } | 
|  | 977 |  | 
|  | 978 | /* find the handle and take a reference to it */ | 
|  | 979 | handle = ion_import(client, buffer); | 
|  | 980 | if (IS_ERR_OR_NULL(handle)) { | 
|  | 981 | ret = -EINVAL; | 
|  | 982 | goto err; | 
|  | 983 | } | 
|  | 984 |  | 
|  | 985 | if (!handle->buffer->heap->ops->map_user) { | 
|  | 986 | pr_err("%s: this heap does not define a method for mapping " | 
|  | 987 | "to userspace\n", __func__); | 
|  | 988 | ret = -EINVAL; | 
|  | 989 | goto err1; | 
|  | 990 | } | 
|  | 991 |  | 
|  | 992 | mutex_lock(&buffer->lock); | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 993 | if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) { | 
|  | 994 | if (buffer->flags != flags) { | 
|  | 995 | pr_err("%s: buffer was already mapped with flags %lx," | 
|  | 996 | " cannot map with flags %lx\n", __func__, | 
|  | 997 | buffer->flags, flags); | 
|  | 998 | ret = -EEXIST; | 
|  | 999 | mutex_unlock(&buffer->lock); | 
|  | 1000 | goto err1; | 
|  | 1001 | } | 
|  | 1002 |  | 
|  | 1003 | } else { | 
|  | 1004 | buffer->flags = flags; | 
|  | 1005 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1006 | /* now map it to userspace */ | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 1007 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma, | 
|  | 1008 | flags); | 
|  | 1009 | buffer->umap_cnt++; | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1010 | mutex_unlock(&buffer->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1011 | if (ret) { | 
|  | 1012 | pr_err("%s: failure mapping buffer to userspace\n", | 
|  | 1013 | __func__); | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 1014 | goto err2; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1015 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1016 |  | 
|  | 1017 | vma->vm_ops = &ion_vm_ops; | 
|  | 1018 | /* move the handle into the vm_private_data so we can access it from | 
|  | 1019 | vma_open/close */ | 
|  | 1020 | vma->vm_private_data = handle; | 
|  | 1021 | pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", | 
|  | 1022 | __func__, __LINE__, | 
|  | 1023 | atomic_read(&client->ref.refcount), | 
|  | 1024 | atomic_read(&handle->ref.refcount), | 
|  | 1025 | atomic_read(&buffer->ref.refcount)); | 
|  | 1026 | return 0; | 
|  | 1027 |  | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 1028 | err2: | 
|  | 1029 | buffer->umap_cnt--; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1030 | /* drop the reference to the handle */ | 
| Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 1031 | err1: | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1032 | ion_handle_put(handle); | 
|  | 1033 | err: | 
| Iliyan Malchev | 3fe2436 | 2011-08-09 14:42:08 -0700 | [diff] [blame] | 1034 | /* drop the reference to the client */ | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1035 | ion_client_put(client); | 
|  | 1036 | return ret; | 
|  | 1037 | } | 
|  | 1038 |  | 
|  | 1039 | static const struct file_operations ion_share_fops = { | 
|  | 1040 | .owner		= THIS_MODULE, | 
|  | 1041 | .release	= ion_share_release, | 
|  | 1042 | .mmap		= ion_share_mmap, | 
|  | 1043 | }; | 
|  | 1044 |  | 
|  | 1045 | static int ion_ioctl_share(struct file *parent, struct ion_client *client, | 
|  | 1046 | struct ion_handle *handle) | 
|  | 1047 | { | 
|  | 1048 | int fd = get_unused_fd(); | 
|  | 1049 | struct file *file; | 
|  | 1050 |  | 
|  | 1051 | if (fd < 0) | 
|  | 1052 | return -ENFILE; | 
|  | 1053 |  | 
|  | 1054 | file = anon_inode_getfile("ion_share_fd", &ion_share_fops, | 
|  | 1055 | handle->buffer, O_RDWR); | 
|  | 1056 | if (IS_ERR_OR_NULL(file)) | 
|  | 1057 | goto err; | 
| Laura Abbott | 4b5d048 | 2011-09-27 18:35:14 -0700 | [diff] [blame] | 1058 |  | 
|  | 1059 | if (parent->f_flags & O_DSYNC) | 
|  | 1060 | file->f_flags |= O_DSYNC; | 
|  | 1061 |  | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1062 | ion_buffer_get(handle->buffer); | 
|  | 1063 | fd_install(fd, file); | 
|  | 1064 |  | 
|  | 1065 | return fd; | 
|  | 1066 |  | 
|  | 1067 | err: | 
|  | 1068 | put_unused_fd(fd); | 
|  | 1069 | return -ENFILE; | 
|  | 1070 | } | 
|  | 1071 |  | 
|  | 1072 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 
|  | 1073 | { | 
|  | 1074 | struct ion_client *client = filp->private_data; | 
|  | 1075 |  | 
|  | 1076 | switch (cmd) { | 
|  | 1077 | case ION_IOC_ALLOC: | 
|  | 1078 | { | 
|  | 1079 | struct ion_allocation_data data; | 
|  | 1080 |  | 
|  | 1081 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | 
|  | 1082 | return -EFAULT; | 
|  | 1083 | data.handle = ion_alloc(client, data.len, data.align, | 
|  | 1084 | data.flags); | 
|  | 1085 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | 
|  | 1086 | return -EFAULT; | 
|  | 1087 | break; | 
|  | 1088 | } | 
|  | 1089 | case ION_IOC_FREE: | 
|  | 1090 | { | 
|  | 1091 | struct ion_handle_data data; | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1092 | bool valid; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1093 |  | 
|  | 1094 | if (copy_from_user(&data, (void __user *)arg, | 
|  | 1095 | sizeof(struct ion_handle_data))) | 
|  | 1096 | return -EFAULT; | 
|  | 1097 | mutex_lock(&client->lock); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1098 | valid = ion_handle_validate(client, data.handle); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1099 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1100 | if (!valid) | 
|  | 1101 | return -EINVAL; | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1102 | ion_free(client, data.handle); | 
|  | 1103 | break; | 
|  | 1104 | } | 
|  | 1105 | case ION_IOC_MAP: | 
|  | 1106 | case ION_IOC_SHARE: | 
|  | 1107 | { | 
|  | 1108 | struct ion_fd_data data; | 
|  | 1109 |  | 
|  | 1110 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | 
|  | 1111 | return -EFAULT; | 
|  | 1112 | mutex_lock(&client->lock); | 
|  | 1113 | if (!ion_handle_validate(client, data.handle)) { | 
|  | 1114 | pr_err("%s: invalid handle passed to share ioctl.\n", | 
|  | 1115 | __func__); | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1116 | mutex_unlock(&client->lock); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1117 | return -EINVAL; | 
|  | 1118 | } | 
|  | 1119 | data.fd = ion_ioctl_share(filp, client, data.handle); | 
|  | 1120 | mutex_unlock(&client->lock); | 
|  | 1121 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | 
|  | 1122 | return -EFAULT; | 
|  | 1123 | break; | 
|  | 1124 | } | 
|  | 1125 | case ION_IOC_IMPORT: | 
|  | 1126 | { | 
|  | 1127 | struct ion_fd_data data; | 
|  | 1128 | if (copy_from_user(&data, (void __user *)arg, | 
|  | 1129 | sizeof(struct ion_fd_data))) | 
|  | 1130 | return -EFAULT; | 
|  | 1131 |  | 
|  | 1132 | data.handle = ion_import_fd(client, data.fd); | 
|  | 1133 | if (IS_ERR(data.handle)) | 
|  | 1134 | data.handle = NULL; | 
|  | 1135 | if (copy_to_user((void __user *)arg, &data, | 
|  | 1136 | sizeof(struct ion_fd_data))) | 
|  | 1137 | return -EFAULT; | 
|  | 1138 | break; | 
|  | 1139 | } | 
|  | 1140 | case ION_IOC_CUSTOM: | 
|  | 1141 | { | 
|  | 1142 | struct ion_device *dev = client->dev; | 
|  | 1143 | struct ion_custom_data data; | 
|  | 1144 |  | 
|  | 1145 | if (!dev->custom_ioctl) | 
|  | 1146 | return -ENOTTY; | 
|  | 1147 | if (copy_from_user(&data, (void __user *)arg, | 
|  | 1148 | sizeof(struct ion_custom_data))) | 
|  | 1149 | return -EFAULT; | 
|  | 1150 | return dev->custom_ioctl(client, data.cmd, data.arg); | 
|  | 1151 | } | 
| Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame^] | 1152 | case ION_IOC_CLEAN_CACHES: | 
|  | 1153 | case ION_IOC_INV_CACHES: | 
|  | 1154 | case ION_IOC_CLEAN_INV_CACHES: | 
|  | 1155 | { | 
|  | 1156 | struct ion_flush_data data; | 
|  | 1157 |  | 
|  | 1158 | if (copy_from_user(&data, (void __user *)arg, | 
|  | 1159 | sizeof(struct ion_flush_data))) | 
|  | 1160 | return -EFAULT; | 
|  | 1161 |  | 
|  | 1162 | return ion_do_cache_op(client, data.handle, data.vaddr, | 
|  | 1163 | data.offset, data.length, cmd); | 
|  | 1164 |  | 
|  | 1165 | } | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1166 | default: | 
|  | 1167 | return -ENOTTY; | 
|  | 1168 | } | 
|  | 1169 | return 0; | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | static int ion_release(struct inode *inode, struct file *file) | 
|  | 1173 | { | 
|  | 1174 | struct ion_client *client = file->private_data; | 
|  | 1175 |  | 
|  | 1176 | pr_debug("%s: %d\n", __func__, __LINE__); | 
|  | 1177 | ion_client_put(client); | 
|  | 1178 | return 0; | 
|  | 1179 | } | 
|  | 1180 |  | 
|  | 1181 | static int ion_open(struct inode *inode, struct file *file) | 
|  | 1182 | { | 
|  | 1183 | struct miscdevice *miscdev = file->private_data; | 
|  | 1184 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | 
|  | 1185 | struct ion_client *client; | 
|  | 1186 |  | 
|  | 1187 | pr_debug("%s: %d\n", __func__, __LINE__); | 
| Rebecca Schultz Zavin | 6d3b958 | 2011-07-06 18:07:24 -0700 | [diff] [blame] | 1188 | client = ion_client_create(dev, -1, "user"); | 
|  | 1189 | if (IS_ERR_OR_NULL(client)) | 
|  | 1190 | return PTR_ERR(client); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1191 | file->private_data = client; | 
|  | 1192 |  | 
|  | 1193 | return 0; | 
|  | 1194 | } | 
|  | 1195 |  | 
|  | 1196 | static const struct file_operations ion_fops = { | 
|  | 1197 | .owner          = THIS_MODULE, | 
|  | 1198 | .open           = ion_open, | 
|  | 1199 | .release        = ion_release, | 
|  | 1200 | .unlocked_ioctl = ion_ioctl, | 
|  | 1201 | }; | 
|  | 1202 |  | 
|  | 1203 | static size_t ion_debug_heap_total(struct ion_client *client, | 
|  | 1204 | enum ion_heap_type type) | 
|  | 1205 | { | 
|  | 1206 | size_t size = 0; | 
|  | 1207 | struct rb_node *n; | 
|  | 1208 |  | 
|  | 1209 | mutex_lock(&client->lock); | 
|  | 1210 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | 
|  | 1211 | struct ion_handle *handle = rb_entry(n, | 
|  | 1212 | struct ion_handle, | 
|  | 1213 | node); | 
|  | 1214 | if (handle->buffer->heap->type == type) | 
|  | 1215 | size += handle->buffer->size; | 
|  | 1216 | } | 
|  | 1217 | mutex_unlock(&client->lock); | 
|  | 1218 | return size; | 
|  | 1219 | } | 
|  | 1220 |  | 
|  | 1221 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | 
|  | 1222 | { | 
|  | 1223 | struct ion_heap *heap = s->private; | 
|  | 1224 | struct ion_device *dev = heap->dev; | 
|  | 1225 | struct rb_node *n; | 
|  | 1226 |  | 
|  | 1227 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | 
|  | 1228 | for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { | 
|  | 1229 | struct ion_client *client = rb_entry(n, struct ion_client, | 
|  | 1230 | node); | 
|  | 1231 | char task_comm[TASK_COMM_LEN]; | 
|  | 1232 | size_t size = ion_debug_heap_total(client, heap->type); | 
|  | 1233 | if (!size) | 
|  | 1234 | continue; | 
|  | 1235 |  | 
|  | 1236 | get_task_comm(task_comm, client->task); | 
|  | 1237 | seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, | 
|  | 1238 | size); | 
|  | 1239 | } | 
|  | 1240 |  | 
|  | 1241 | for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { | 
|  | 1242 | struct ion_client *client = rb_entry(n, struct ion_client, | 
|  | 1243 | node); | 
|  | 1244 | size_t size = ion_debug_heap_total(client, heap->type); | 
|  | 1245 | if (!size) | 
|  | 1246 | continue; | 
|  | 1247 | seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, | 
|  | 1248 | size); | 
|  | 1249 | } | 
|  | 1250 | return 0; | 
|  | 1251 | } | 
|  | 1252 |  | 
|  | 1253 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | 
|  | 1254 | { | 
|  | 1255 | return single_open(file, ion_debug_heap_show, inode->i_private); | 
|  | 1256 | } | 
|  | 1257 |  | 
|  | 1258 | static const struct file_operations debug_heap_fops = { | 
|  | 1259 | .open = ion_debug_heap_open, | 
|  | 1260 | .read = seq_read, | 
|  | 1261 | .llseek = seq_lseek, | 
|  | 1262 | .release = single_release, | 
|  | 1263 | }; | 
|  | 1264 |  | 
|  | 1265 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | 
|  | 1266 | { | 
|  | 1267 | struct rb_node **p = &dev->heaps.rb_node; | 
|  | 1268 | struct rb_node *parent = NULL; | 
|  | 1269 | struct ion_heap *entry; | 
|  | 1270 |  | 
|  | 1271 | heap->dev = dev; | 
|  | 1272 | mutex_lock(&dev->lock); | 
|  | 1273 | while (*p) { | 
|  | 1274 | parent = *p; | 
|  | 1275 | entry = rb_entry(parent, struct ion_heap, node); | 
|  | 1276 |  | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1277 | if (heap->id < entry->id) { | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1278 | p = &(*p)->rb_left; | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1279 | } else if (heap->id > entry->id ) { | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1280 | p = &(*p)->rb_right; | 
|  | 1281 | } else { | 
|  | 1282 | pr_err("%s: can not insert multiple heaps with " | 
| Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 1283 | "id %d\n", __func__, heap->id); | 
| Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1284 | goto end; | 
|  | 1285 | } | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | rb_link_node(&heap->node, parent, p); | 
|  | 1289 | rb_insert_color(&heap->node, &dev->heaps); | 
|  | 1290 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | 
|  | 1291 | &debug_heap_fops); | 
|  | 1292 | end: | 
|  | 1293 | mutex_unlock(&dev->lock); | 
|  | 1294 | } | 
|  | 1295 |  | 
|  | 1296 | struct ion_device *ion_device_create(long (*custom_ioctl) | 
|  | 1297 | (struct ion_client *client, | 
|  | 1298 | unsigned int cmd, | 
|  | 1299 | unsigned long arg)) | 
|  | 1300 | { | 
|  | 1301 | struct ion_device *idev; | 
|  | 1302 | int ret; | 
|  | 1303 |  | 
|  | 1304 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | 
|  | 1305 | if (!idev) | 
|  | 1306 | return ERR_PTR(-ENOMEM); | 
|  | 1307 |  | 
|  | 1308 | idev->dev.minor = MISC_DYNAMIC_MINOR; | 
|  | 1309 | idev->dev.name = "ion"; | 
|  | 1310 | idev->dev.fops = &ion_fops; | 
|  | 1311 | idev->dev.parent = NULL; | 
|  | 1312 | ret = misc_register(&idev->dev); | 
|  | 1313 | if (ret) { | 
|  | 1314 | pr_err("ion: failed to register misc device.\n"); | 
|  | 1315 | return ERR_PTR(ret); | 
|  | 1316 | } | 
|  | 1317 |  | 
|  | 1318 | idev->debug_root = debugfs_create_dir("ion", NULL); | 
|  | 1319 | if (IS_ERR_OR_NULL(idev->debug_root)) | 
|  | 1320 | pr_err("ion: failed to create debug files.\n"); | 
|  | 1321 |  | 
|  | 1322 | idev->custom_ioctl = custom_ioctl; | 
|  | 1323 | idev->buffers = RB_ROOT; | 
|  | 1324 | mutex_init(&idev->lock); | 
|  | 1325 | idev->heaps = RB_ROOT; | 
|  | 1326 | idev->user_clients = RB_ROOT; | 
|  | 1327 | idev->kernel_clients = RB_ROOT; | 
|  | 1328 | return idev; | 
|  | 1329 | } | 
|  | 1330 |  | 
|  | 1331 | void ion_device_destroy(struct ion_device *dev) | 
|  | 1332 | { | 
|  | 1333 | misc_deregister(&dev->dev); | 
|  | 1334 | /* XXX need to free the heaps and clients ? */ | 
|  | 1335 | kfree(dev); | 
|  | 1336 | } |