David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 1 | #ifndef __DRM_VMA_MANAGER_H__ |
| 2 | #define __DRM_VMA_MANAGER_H__ |
| 3 | |
| 4 | /* |
| 5 | * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the "Software"), |
| 9 | * to deal in the Software without restriction, including without limitation |
| 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 11 | * and/or sell copies of the Software, and to permit persons to whom the |
| 12 | * Software is furnished to do so, subject to the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice shall be included in |
| 15 | * all copies or substantial portions of the Software. |
| 16 | * |
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 23 | * OTHER DEALINGS IN THE SOFTWARE. |
| 24 | */ |
| 25 | |
| 26 | #include <drm/drm_mm.h> |
David Herrmann | 51335df | 2013-07-24 21:10:03 +0200 | [diff] [blame] | 27 | #include <linux/mm.h> |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 28 | #include <linux/rbtree.h> |
| 29 | #include <linux/spinlock.h> |
| 30 | #include <linux/types.h> |
| 31 | |
Thomas Zimmermann | bf141a8 | 2019-02-07 09:59:31 +0100 | [diff] [blame] | 32 | /* We make up offsets for buffer objects so we can recognize them at |
| 33 | * mmap time. pgoff in mmap is an unsigned long, so we need to make sure |
| 34 | * that the faked up offset will fit |
| 35 | */ |
| 36 | #if BITS_PER_LONG == 64 |
| 37 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
Philip Yang | 6b62206 | 2019-04-17 22:15:19 +0000 | [diff] [blame] | 38 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256) |
Thomas Zimmermann | bf141a8 | 2019-02-07 09:59:31 +0100 | [diff] [blame] | 39 | #else |
| 40 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
| 41 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
| 42 | #endif |
| 43 | |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 44 | struct drm_file; |
| 45 | |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 46 | struct drm_vma_offset_file { |
| 47 | struct rb_node vm_rb; |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 48 | struct drm_file *vm_tag; |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 49 | unsigned long vm_count; |
| 50 | }; |
| 51 | |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 52 | struct drm_vma_offset_node { |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 53 | rwlock_t vm_lock; |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 54 | struct drm_mm_node vm_node; |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 55 | struct rb_root vm_files; |
Chris Wilson | 3e977ac | 2018-07-12 19:53:13 +0100 | [diff] [blame] | 56 | bool readonly:1; |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 57 | }; |
| 58 | |
| 59 | struct drm_vma_offset_manager { |
| 60 | rwlock_t vm_lock; |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 61 | struct drm_mm vm_addr_space_mm; |
| 62 | }; |
| 63 | |
| 64 | void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, |
| 65 | unsigned long page_offset, unsigned long size); |
| 66 | void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); |
| 67 | |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 68 | struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, |
| 69 | unsigned long start, |
| 70 | unsigned long pages); |
| 71 | int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, |
| 72 | struct drm_vma_offset_node *node, unsigned long pages); |
| 73 | void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, |
| 74 | struct drm_vma_offset_node *node); |
| 75 | |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 76 | int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); |
| 77 | void drm_vma_node_revoke(struct drm_vma_offset_node *node, |
| 78 | struct drm_file *tag); |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 79 | bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 80 | struct drm_file *tag); |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 81 | |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 82 | /** |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 83 | * drm_vma_offset_exact_lookup_locked() - Look up node by exact address |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 84 | * @mgr: Manager object |
| 85 | * @start: Start address (page-based, not byte-based) |
| 86 | * @pages: Size of object (page-based) |
| 87 | * |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 88 | * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 89 | * It only returns the exact object with the given start address. |
| 90 | * |
| 91 | * RETURNS: |
| 92 | * Node at exact start address @start. |
| 93 | */ |
| 94 | static inline struct drm_vma_offset_node * |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 95 | drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, |
| 96 | unsigned long start, |
| 97 | unsigned long pages) |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 98 | { |
| 99 | struct drm_vma_offset_node *node; |
| 100 | |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 101 | node = drm_vma_offset_lookup_locked(mgr, start, pages); |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 102 | return (node && node->vm_node.start == start) ? node : NULL; |
| 103 | } |
| 104 | |
| 105 | /** |
| 106 | * drm_vma_offset_lock_lookup() - Lock lookup for extended private use |
| 107 | * @mgr: Manager object |
| 108 | * |
Lukas Wunner | f71a6d6 | 2015-10-12 09:10:33 +0200 | [diff] [blame] | 109 | * Lock VMA manager for extended lookups. Only locked VMA function calls |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 110 | * are allowed while holding this lock. All other contexts are blocked from VMA |
| 111 | * until the lock is released via drm_vma_offset_unlock_lookup(). |
| 112 | * |
| 113 | * Use this if you need to take a reference to the objects returned by |
| 114 | * drm_vma_offset_lookup_locked() before releasing this lock again. |
| 115 | * |
| 116 | * This lock must not be used for anything else than extended lookups. You must |
| 117 | * not call any other VMA helpers while holding this lock. |
| 118 | * |
| 119 | * Note: You're in atomic-context while holding this lock! |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 120 | */ |
| 121 | static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) |
| 122 | { |
| 123 | read_lock(&mgr->vm_lock); |
| 124 | } |
| 125 | |
| 126 | /** |
| 127 | * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use |
| 128 | * @mgr: Manager object |
| 129 | * |
| 130 | * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information. |
| 131 | */ |
| 132 | static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr) |
| 133 | { |
| 134 | read_unlock(&mgr->vm_lock); |
| 135 | } |
| 136 | |
| 137 | /** |
| 138 | * drm_vma_node_reset() - Initialize or reset node object |
| 139 | * @node: Node to initialize or reset |
| 140 | * |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 141 | * Reset a node to its initial state. This must be called before using it with |
| 142 | * any VMA offset manager. |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 143 | * |
| 144 | * This must not be called on an already allocated node, or you will leak |
| 145 | * memory. |
| 146 | */ |
| 147 | static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) |
| 148 | { |
| 149 | memset(node, 0, sizeof(*node)); |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 150 | node->vm_files = RB_ROOT; |
| 151 | rwlock_init(&node->vm_lock); |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /** |
| 155 | * drm_vma_node_start() - Return start address for page-based addressing |
| 156 | * @node: Node to inspect |
| 157 | * |
| 158 | * Return the start address of the given node. This can be used as offset into |
| 159 | * the linear VM space that is provided by the VMA offset manager. Note that |
| 160 | * this can only be used for page-based addressing. If you need a proper offset |
| 161 | * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the |
| 162 | * drm_vma_node_offset_addr() helper instead. |
| 163 | * |
| 164 | * RETURNS: |
| 165 | * Start address of @node for page-based addressing. 0 if the node does not |
| 166 | * have an offset allocated. |
| 167 | */ |
Noralf Trønnes | bf38b05 | 2017-11-07 20:13:37 +0100 | [diff] [blame] | 168 | static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 169 | { |
| 170 | return node->vm_node.start; |
| 171 | } |
| 172 | |
| 173 | /** |
| 174 | * drm_vma_node_size() - Return size (page-based) |
| 175 | * @node: Node to inspect |
| 176 | * |
| 177 | * Return the size as number of pages for the given node. This is the same size |
| 178 | * that was passed to drm_vma_offset_add(). If no offset is allocated for the |
| 179 | * node, this is 0. |
| 180 | * |
| 181 | * RETURNS: |
| 182 | * Size of @node as number of pages. 0 if the node does not have an offset |
| 183 | * allocated. |
| 184 | */ |
| 185 | static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node) |
| 186 | { |
| 187 | return node->vm_node.size; |
| 188 | } |
| 189 | |
| 190 | /** |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 191 | * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps |
| 192 | * @node: Linked offset node |
| 193 | * |
| 194 | * Same as drm_vma_node_start() but returns the address as a valid offset that |
| 195 | * can be used for user-space mappings during mmap(). |
| 196 | * This must not be called on unlinked nodes. |
| 197 | * |
| 198 | * RETURNS: |
| 199 | * Offset of @node for byte-based addressing. 0 if the node does not have an |
| 200 | * object allocated. |
| 201 | */ |
| 202 | static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node) |
| 203 | { |
| 204 | return ((__u64)node->vm_node.start) << PAGE_SHIFT; |
| 205 | } |
| 206 | |
David Herrmann | 51335df | 2013-07-24 21:10:03 +0200 | [diff] [blame] | 207 | /** |
| 208 | * drm_vma_node_unmap() - Unmap offset node |
| 209 | * @node: Offset node |
| 210 | * @file_mapping: Address space to unmap @node from |
| 211 | * |
| 212 | * Unmap all userspace mappings for a given offset node. The mappings must be |
David Herrmann | 44d847b | 2013-08-13 19:10:30 +0200 | [diff] [blame] | 213 | * associated with the @file_mapping address-space. If no offset exists |
| 214 | * nothing is done. |
David Herrmann | 51335df | 2013-07-24 21:10:03 +0200 | [diff] [blame] | 215 | * |
| 216 | * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() |
| 217 | * is not called on this node concurrently. |
| 218 | */ |
| 219 | static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, |
| 220 | struct address_space *file_mapping) |
| 221 | { |
Daniel Vetter | f74418a | 2016-03-30 11:40:52 +0200 | [diff] [blame] | 222 | if (drm_mm_node_allocated(&node->vm_node)) |
David Herrmann | 51335df | 2013-07-24 21:10:03 +0200 | [diff] [blame] | 223 | unmap_mapping_range(file_mapping, |
| 224 | drm_vma_node_offset_addr(node), |
| 225 | drm_vma_node_size(node) << PAGE_SHIFT, 1); |
| 226 | } |
| 227 | |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 228 | /** |
| 229 | * drm_vma_node_verify_access() - Access verification helper for TTM |
| 230 | * @node: Offset node |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 231 | * @tag: Tag of file to check |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 232 | * |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 233 | * This checks whether @tag is granted access to @node. It is the same as |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 234 | * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM |
| 235 | * verify_access() callbacks. |
| 236 | * |
| 237 | * RETURNS: |
| 238 | * 0 if access is granted, -EACCES otherwise. |
| 239 | */ |
| 240 | static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node, |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 241 | struct drm_file *tag) |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 242 | { |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 243 | return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES; |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 244 | } |
| 245 | |
David Herrmann | fe3078f | 2013-07-24 21:06:15 +0200 | [diff] [blame] | 246 | #endif /* __DRM_VMA_MANAGER_H__ */ |