David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 3 | * Copyright (c) 2012 David Airlie <airlied@linux.ie> |
| 4 | * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include <drm/drmP.h> |
| 26 | #include <drm/drm_mm.h> |
| 27 | #include <drm/drm_vma_manager.h> |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 28 | #include <linux/fs.h> |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 29 | #include <linux/mm.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/rbtree.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/types.h> |
| 35 | |
| 36 | /** |
| 37 | * DOC: vma offset manager |
| 38 | * |
| 39 | * The vma-manager is responsible to map arbitrary driver-dependent memory |
| 40 | * regions into the linear user address-space. It provides offsets to the |
| 41 | * caller which can then be used on the address_space of the drm-device. It |
| 42 | * takes care to not overlap regions, size them appropriately and to not |
| 43 | * confuse mm-core by inconsistent fake vm_pgoff fields. |
| 44 | * Drivers shouldn't use this for object placement in VMEM. This manager should |
| 45 | * only be used to manage mappings into linear user-space VMs. |
| 46 | * |
| 47 | * We use drm_mm as backend to manage object allocations. But it is highly |
| 48 | * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to |
| 49 | * speed up offset lookups. |
| 50 | * |
| 51 | * You must not use multiple offset managers on a single address_space. |
| 52 | * Otherwise, mm-core will be unable to tear down memory mappings as the VM will |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 53 | * no longer be linear. |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 54 | * |
| 55 | * This offset manager works on page-based addresses. That is, every argument |
| 56 | * and return code (with the exception of drm_vma_node_offset_addr()) is given |
| 57 | * in number of pages, not number of bytes. That means, object sizes and offsets |
| 58 | * must always be page-aligned (as usual). |
| 59 | * If you want to get a valid byte-based user-space address for a given offset, |
| 60 | * please see drm_vma_node_offset_addr(). |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 61 | * |
| 62 | * Additionally to offset management, the vma offset manager also handles access |
| 63 | * management. For every open-file context that is allowed to access a given |
| 64 | * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this |
| 65 | * open-file with the offset of the node will fail with -EACCES. To revoke |
| 66 | * access again, use drm_vma_node_revoke(). However, the caller is responsible |
| 67 | * for destroying already existing mappings, if required. |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 68 | */ |
| 69 | |
| 70 | /** |
| 71 | * drm_vma_offset_manager_init - Initialize new offset-manager |
| 72 | * @mgr: Manager object |
| 73 | * @page_offset: Offset of available memory area (page-based) |
| 74 | * @size: Size of available address space range (page-based) |
| 75 | * |
| 76 | * Initialize a new offset-manager. The offset and area size available for the |
| 77 | * manager are given as @page_offset and @size. Both are interpreted as |
| 78 | * page-numbers, not bytes. |
| 79 | * |
| 80 | * Adding/removing nodes from the manager is locked internally and protected |
| 81 | * against concurrent access. However, node allocation and destruction is left |
| 82 | * for the caller. While calling into the vma-manager, a given node must |
| 83 | * always be guaranteed to be referenced. |
| 84 | */ |
| 85 | void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, |
| 86 | unsigned long page_offset, unsigned long size) |
| 87 | { |
| 88 | rwlock_init(&mgr->vm_lock); |
| 89 | mgr->vm_addr_space_rb = RB_ROOT; |
| 90 | drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); |
| 91 | } |
| 92 | EXPORT_SYMBOL(drm_vma_offset_manager_init); |
| 93 | |
| 94 | /** |
| 95 | * drm_vma_offset_manager_destroy() - Destroy offset manager |
| 96 | * @mgr: Manager object |
| 97 | * |
| 98 | * Destroy an object manager which was previously created via |
| 99 | * drm_vma_offset_manager_init(). The caller must remove all allocated nodes |
| 100 | * before destroying the manager. Otherwise, drm_mm will refuse to free the |
| 101 | * requested resources. |
| 102 | * |
| 103 | * The manager must not be accessed after this function is called. |
| 104 | */ |
| 105 | void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) |
| 106 | { |
| 107 | /* take the lock to protect against buggy drivers */ |
| 108 | write_lock(&mgr->vm_lock); |
| 109 | drm_mm_takedown(&mgr->vm_addr_space_mm); |
| 110 | write_unlock(&mgr->vm_lock); |
| 111 | } |
| 112 | EXPORT_SYMBOL(drm_vma_offset_manager_destroy); |
| 113 | |
| 114 | /** |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame^] | 115 | * drm_vma_offset_lookup_locked() - Find node in offset space |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 116 | * @mgr: Manager object |
| 117 | * @start: Start address for object (page-based) |
| 118 | * @pages: Size of object (page-based) |
| 119 | * |
| 120 | * Find a node given a start address and object size. This returns the _best_ |
| 121 | * match for the given node. That is, @start may point somewhere into a valid |
| 122 | * region and the given node will be returned, as long as the node spans the |
| 123 | * whole requested area (given the size in number of pages as @pages). |
| 124 | * |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame^] | 125 | * Note that before lookup the vma offset manager lookup lock must be acquired |
| 126 | * with drm_vma_offset_lock_lookup(). See there for an example. This can then be |
| 127 | * used to implement weakly referenced lookups using kref_get_unless_zero(). |
| 128 | * |
| 129 | * Example: |
| 130 | * drm_vma_offset_lock_lookup(mgr); |
| 131 | * node = drm_vma_offset_lookup_locked(mgr); |
| 132 | * if (node) |
| 133 | * kref_get_unless_zero(container_of(node, sth, entr)); |
| 134 | * drm_vma_offset_unlock_lookup(mgr); |
| 135 | * |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 136 | * RETURNS: |
| 137 | * Returns NULL if no suitable node can be found. Otherwise, the best match |
| 138 | * is returned. It's the caller's responsibility to make sure the node doesn't |
| 139 | * get destroyed before the caller can access it. |
| 140 | */ |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 141 | struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, |
| 142 | unsigned long start, |
| 143 | unsigned long pages) |
| 144 | { |
| 145 | struct drm_vma_offset_node *node, *best; |
| 146 | struct rb_node *iter; |
| 147 | unsigned long offset; |
| 148 | |
| 149 | iter = mgr->vm_addr_space_rb.rb_node; |
| 150 | best = NULL; |
| 151 | |
| 152 | while (likely(iter)) { |
| 153 | node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); |
| 154 | offset = node->vm_node.start; |
| 155 | if (start >= offset) { |
| 156 | iter = iter->rb_right; |
| 157 | best = node; |
| 158 | if (start == offset) |
| 159 | break; |
| 160 | } else { |
| 161 | iter = iter->rb_left; |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | /* verify that the node spans the requested area */ |
| 166 | if (best) { |
| 167 | offset = best->vm_node.start + best->vm_node.size; |
| 168 | if (offset < start + pages) |
| 169 | best = NULL; |
| 170 | } |
| 171 | |
| 172 | return best; |
| 173 | } |
| 174 | EXPORT_SYMBOL(drm_vma_offset_lookup_locked); |
| 175 | |
| 176 | /* internal helper to link @node into the rb-tree */ |
| 177 | static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr, |
| 178 | struct drm_vma_offset_node *node) |
| 179 | { |
| 180 | struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; |
| 181 | struct rb_node *parent = NULL; |
| 182 | struct drm_vma_offset_node *iter_node; |
| 183 | |
| 184 | while (likely(*iter)) { |
| 185 | parent = *iter; |
| 186 | iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); |
| 187 | |
| 188 | if (node->vm_node.start < iter_node->vm_node.start) |
| 189 | iter = &(*iter)->rb_left; |
| 190 | else if (node->vm_node.start > iter_node->vm_node.start) |
| 191 | iter = &(*iter)->rb_right; |
| 192 | else |
| 193 | BUG(); |
| 194 | } |
| 195 | |
| 196 | rb_link_node(&node->vm_rb, parent, iter); |
| 197 | rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb); |
| 198 | } |
| 199 | |
| 200 | /** |
| 201 | * drm_vma_offset_add() - Add offset node to manager |
| 202 | * @mgr: Manager object |
| 203 | * @node: Node to be added |
| 204 | * @pages: Allocation size visible to user-space (in number of pages) |
| 205 | * |
| 206 | * Add a node to the offset-manager. If the node was already added, this does |
| 207 | * nothing and return 0. @pages is the size of the object given in number of |
| 208 | * pages. |
| 209 | * After this call succeeds, you can access the offset of the node until it |
| 210 | * is removed again. |
| 211 | * |
| 212 | * If this call fails, it is safe to retry the operation or call |
| 213 | * drm_vma_offset_remove(), anyway. However, no cleanup is required in that |
| 214 | * case. |
| 215 | * |
| 216 | * @pages is not required to be the same size as the underlying memory object |
| 217 | * that you want to map. It only limits the size that user-space can map into |
| 218 | * their address space. |
| 219 | * |
| 220 | * RETURNS: |
| 221 | * 0 on success, negative error code on failure. |
| 222 | */ |
| 223 | int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, |
| 224 | struct drm_vma_offset_node *node, unsigned long pages) |
| 225 | { |
| 226 | int ret; |
| 227 | |
| 228 | write_lock(&mgr->vm_lock); |
| 229 | |
| 230 | if (drm_mm_node_allocated(&node->vm_node)) { |
| 231 | ret = 0; |
| 232 | goto out_unlock; |
| 233 | } |
| 234 | |
David Herrmann | 31e5d7c | 2013-07-27 13:36:27 +0200 | [diff] [blame] | 235 | ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, |
| 236 | pages, 0, DRM_MM_SEARCH_DEFAULT); |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 237 | if (ret) |
| 238 | goto out_unlock; |
| 239 | |
| 240 | _drm_vma_offset_add_rb(mgr, node); |
| 241 | |
| 242 | out_unlock: |
| 243 | write_unlock(&mgr->vm_lock); |
| 244 | return ret; |
| 245 | } |
| 246 | EXPORT_SYMBOL(drm_vma_offset_add); |
| 247 | |
| 248 | /** |
| 249 | * drm_vma_offset_remove() - Remove offset node from manager |
| 250 | * @mgr: Manager object |
| 251 | * @node: Node to be removed |
| 252 | * |
| 253 | * Remove a node from the offset manager. If the node wasn't added before, this |
| 254 | * does nothing. After this call returns, the offset and size will be 0 until a |
| 255 | * new offset is allocated via drm_vma_offset_add() again. Helper functions like |
| 256 | * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no |
| 257 | * offset is allocated. |
| 258 | */ |
| 259 | void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, |
| 260 | struct drm_vma_offset_node *node) |
| 261 | { |
| 262 | write_lock(&mgr->vm_lock); |
| 263 | |
| 264 | if (drm_mm_node_allocated(&node->vm_node)) { |
| 265 | rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb); |
| 266 | drm_mm_remove_node(&node->vm_node); |
| 267 | memset(&node->vm_node, 0, sizeof(node->vm_node)); |
| 268 | } |
| 269 | |
| 270 | write_unlock(&mgr->vm_lock); |
| 271 | } |
| 272 | EXPORT_SYMBOL(drm_vma_offset_remove); |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 273 | |
| 274 | /** |
| 275 | * drm_vma_node_allow - Add open-file to list of allowed users |
| 276 | * @node: Node to modify |
| 277 | * @filp: Open file to add |
| 278 | * |
| 279 | * Add @filp to the list of allowed open-files for this node. If @filp is |
| 280 | * already on this list, the ref-count is incremented. |
| 281 | * |
| 282 | * The list of allowed-users is preserved across drm_vma_offset_add() and |
| 283 | * drm_vma_offset_remove() calls. You may even call it if the node is currently |
| 284 | * not added to any offset-manager. |
| 285 | * |
| 286 | * You must remove all open-files the same number of times as you added them |
| 287 | * before destroying the node. Otherwise, you will leak memory. |
| 288 | * |
| 289 | * This is locked against concurrent access internally. |
| 290 | * |
| 291 | * RETURNS: |
| 292 | * 0 on success, negative error code on internal failure (out-of-mem) |
| 293 | */ |
| 294 | int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp) |
| 295 | { |
| 296 | struct rb_node **iter; |
| 297 | struct rb_node *parent = NULL; |
| 298 | struct drm_vma_offset_file *new, *entry; |
| 299 | int ret = 0; |
| 300 | |
| 301 | /* Preallocate entry to avoid atomic allocations below. It is quite |
| 302 | * unlikely that an open-file is added twice to a single node so we |
| 303 | * don't optimize for this case. OOM is checked below only if the entry |
| 304 | * is actually used. */ |
| 305 | new = kmalloc(sizeof(*entry), GFP_KERNEL); |
| 306 | |
| 307 | write_lock(&node->vm_lock); |
| 308 | |
| 309 | iter = &node->vm_files.rb_node; |
| 310 | |
| 311 | while (likely(*iter)) { |
| 312 | parent = *iter; |
| 313 | entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); |
| 314 | |
| 315 | if (filp == entry->vm_filp) { |
| 316 | entry->vm_count++; |
| 317 | goto unlock; |
| 318 | } else if (filp > entry->vm_filp) { |
| 319 | iter = &(*iter)->rb_right; |
| 320 | } else { |
| 321 | iter = &(*iter)->rb_left; |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | if (!new) { |
| 326 | ret = -ENOMEM; |
| 327 | goto unlock; |
| 328 | } |
| 329 | |
| 330 | new->vm_filp = filp; |
| 331 | new->vm_count = 1; |
| 332 | rb_link_node(&new->vm_rb, parent, iter); |
| 333 | rb_insert_color(&new->vm_rb, &node->vm_files); |
| 334 | new = NULL; |
| 335 | |
| 336 | unlock: |
| 337 | write_unlock(&node->vm_lock); |
| 338 | kfree(new); |
| 339 | return ret; |
| 340 | } |
| 341 | EXPORT_SYMBOL(drm_vma_node_allow); |
| 342 | |
| 343 | /** |
| 344 | * drm_vma_node_revoke - Remove open-file from list of allowed users |
| 345 | * @node: Node to modify |
| 346 | * @filp: Open file to remove |
| 347 | * |
| 348 | * Decrement the ref-count of @filp in the list of allowed open-files on @node. |
| 349 | * If the ref-count drops to zero, remove @filp from the list. You must call |
| 350 | * this once for every drm_vma_node_allow() on @filp. |
| 351 | * |
| 352 | * This is locked against concurrent access internally. |
| 353 | * |
| 354 | * If @filp is not on the list, nothing is done. |
| 355 | */ |
| 356 | void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp) |
| 357 | { |
| 358 | struct drm_vma_offset_file *entry; |
| 359 | struct rb_node *iter; |
| 360 | |
| 361 | write_lock(&node->vm_lock); |
| 362 | |
| 363 | iter = node->vm_files.rb_node; |
| 364 | while (likely(iter)) { |
| 365 | entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); |
| 366 | if (filp == entry->vm_filp) { |
| 367 | if (!--entry->vm_count) { |
| 368 | rb_erase(&entry->vm_rb, &node->vm_files); |
| 369 | kfree(entry); |
| 370 | } |
| 371 | break; |
| 372 | } else if (filp > entry->vm_filp) { |
| 373 | iter = iter->rb_right; |
| 374 | } else { |
| 375 | iter = iter->rb_left; |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | write_unlock(&node->vm_lock); |
| 380 | } |
| 381 | EXPORT_SYMBOL(drm_vma_node_revoke); |
| 382 | |
| 383 | /** |
| 384 | * drm_vma_node_is_allowed - Check whether an open-file is granted access |
| 385 | * @node: Node to check |
| 386 | * @filp: Open-file to check for |
| 387 | * |
| 388 | * Search the list in @node whether @filp is currently on the list of allowed |
| 389 | * open-files (see drm_vma_node_allow()). |
| 390 | * |
| 391 | * This is locked against concurrent access internally. |
| 392 | * |
| 393 | * RETURNS: |
| 394 | * true iff @filp is on the list |
| 395 | */ |
| 396 | bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, |
| 397 | struct file *filp) |
| 398 | { |
| 399 | struct drm_vma_offset_file *entry; |
| 400 | struct rb_node *iter; |
| 401 | |
| 402 | read_lock(&node->vm_lock); |
| 403 | |
| 404 | iter = node->vm_files.rb_node; |
| 405 | while (likely(iter)) { |
| 406 | entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); |
| 407 | if (filp == entry->vm_filp) |
| 408 | break; |
| 409 | else if (filp > entry->vm_filp) |
| 410 | iter = iter->rb_right; |
| 411 | else |
| 412 | iter = iter->rb_left; |
| 413 | } |
| 414 | |
| 415 | read_unlock(&node->vm_lock); |
| 416 | |
| 417 | return iter; |
| 418 | } |
| 419 | EXPORT_SYMBOL(drm_vma_node_is_allowed); |