blob: 76ac5e97a559a2af7b23399b5e4c06b7ee0bb4fc [file] [log] [blame]
David Herrmannfe3078f2013-07-24 21:06:15 +02001#ifndef __DRM_VMA_MANAGER_H__
2#define __DRM_VMA_MANAGER_H__
3
4/*
5 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drm/drm_mm.h>
David Herrmann51335df2013-07-24 21:10:03 +020027#include <linux/mm.h>
David Herrmannfe3078f2013-07-24 21:06:15 +020028#include <linux/rbtree.h>
29#include <linux/spinlock.h>
30#include <linux/types.h>
31
Thomas Zimmermannbf141a82019-02-07 09:59:31 +010032/* We make up offsets for buffer objects so we can recognize them at
33 * mmap time. pgoff in mmap is an unsigned long, so we need to make sure
34 * that the faked up offset will fit
35 */
36#if BITS_PER_LONG == 64
37#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
Philip Yang6b622062019-04-17 22:15:19 +000038#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256)
Thomas Zimmermannbf141a82019-02-07 09:59:31 +010039#else
40#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
41#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
42#endif
43
David Herrmannd9a1f0b2016-09-01 14:48:33 +020044struct drm_file;
45
David Herrmann88d7ebe2013-08-25 18:28:57 +020046struct drm_vma_offset_file {
47 struct rb_node vm_rb;
David Herrmannd9a1f0b2016-09-01 14:48:33 +020048 struct drm_file *vm_tag;
David Herrmann88d7ebe2013-08-25 18:28:57 +020049 unsigned long vm_count;
50};
51
David Herrmannfe3078f2013-07-24 21:06:15 +020052struct drm_vma_offset_node {
David Herrmann88d7ebe2013-08-25 18:28:57 +020053 rwlock_t vm_lock;
David Herrmannfe3078f2013-07-24 21:06:15 +020054 struct drm_mm_node vm_node;
David Herrmann88d7ebe2013-08-25 18:28:57 +020055 struct rb_root vm_files;
Chris Wilson3e977ac2018-07-12 19:53:13 +010056 bool readonly:1;
David Herrmannfe3078f2013-07-24 21:06:15 +020057};
58
59struct drm_vma_offset_manager {
60 rwlock_t vm_lock;
David Herrmannfe3078f2013-07-24 21:06:15 +020061 struct drm_mm vm_addr_space_mm;
62};
63
64void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
65 unsigned long page_offset, unsigned long size);
66void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
67
David Herrmannfe3078f2013-07-24 21:06:15 +020068struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
69 unsigned long start,
70 unsigned long pages);
71int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
72 struct drm_vma_offset_node *node, unsigned long pages);
73void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
74 struct drm_vma_offset_node *node);
75
David Herrmannd9a1f0b2016-09-01 14:48:33 +020076int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
77void drm_vma_node_revoke(struct drm_vma_offset_node *node,
78 struct drm_file *tag);
David Herrmann88d7ebe2013-08-25 18:28:57 +020079bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
David Herrmannd9a1f0b2016-09-01 14:48:33 +020080 struct drm_file *tag);
David Herrmann88d7ebe2013-08-25 18:28:57 +020081
David Herrmannfe3078f2013-07-24 21:06:15 +020082/**
Daniel Vetter2225cfe2015-10-15 11:33:43 +020083 * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
David Herrmannfe3078f2013-07-24 21:06:15 +020084 * @mgr: Manager object
85 * @start: Start address (page-based, not byte-based)
86 * @pages: Size of object (page-based)
87 *
Daniel Vetter2225cfe2015-10-15 11:33:43 +020088 * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
David Herrmannfe3078f2013-07-24 21:06:15 +020089 * It only returns the exact object with the given start address.
90 *
91 * RETURNS:
92 * Node at exact start address @start.
93 */
94static inline struct drm_vma_offset_node *
Daniel Vetter2225cfe2015-10-15 11:33:43 +020095drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
96 unsigned long start,
97 unsigned long pages)
David Herrmannfe3078f2013-07-24 21:06:15 +020098{
99 struct drm_vma_offset_node *node;
100
Daniel Vetter2225cfe2015-10-15 11:33:43 +0200101 node = drm_vma_offset_lookup_locked(mgr, start, pages);
David Herrmannfe3078f2013-07-24 21:06:15 +0200102 return (node && node->vm_node.start == start) ? node : NULL;
103}
104
105/**
106 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
107 * @mgr: Manager object
108 *
Lukas Wunnerf71a6d62015-10-12 09:10:33 +0200109 * Lock VMA manager for extended lookups. Only locked VMA function calls
David Herrmannfe3078f2013-07-24 21:06:15 +0200110 * are allowed while holding this lock. All other contexts are blocked from VMA
111 * until the lock is released via drm_vma_offset_unlock_lookup().
112 *
113 * Use this if you need to take a reference to the objects returned by
114 * drm_vma_offset_lookup_locked() before releasing this lock again.
115 *
116 * This lock must not be used for anything else than extended lookups. You must
117 * not call any other VMA helpers while holding this lock.
118 *
119 * Note: You're in atomic-context while holding this lock!
David Herrmannfe3078f2013-07-24 21:06:15 +0200120 */
121static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
122{
123 read_lock(&mgr->vm_lock);
124}
125
126/**
127 * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
128 * @mgr: Manager object
129 *
130 * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
131 */
132static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
133{
134 read_unlock(&mgr->vm_lock);
135}
136
137/**
138 * drm_vma_node_reset() - Initialize or reset node object
139 * @node: Node to initialize or reset
140 *
David Herrmann88d7ebe2013-08-25 18:28:57 +0200141 * Reset a node to its initial state. This must be called before using it with
142 * any VMA offset manager.
David Herrmannfe3078f2013-07-24 21:06:15 +0200143 *
144 * This must not be called on an already allocated node, or you will leak
145 * memory.
146 */
147static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
148{
149 memset(node, 0, sizeof(*node));
David Herrmann88d7ebe2013-08-25 18:28:57 +0200150 node->vm_files = RB_ROOT;
151 rwlock_init(&node->vm_lock);
David Herrmannfe3078f2013-07-24 21:06:15 +0200152}
153
154/**
155 * drm_vma_node_start() - Return start address for page-based addressing
156 * @node: Node to inspect
157 *
158 * Return the start address of the given node. This can be used as offset into
159 * the linear VM space that is provided by the VMA offset manager. Note that
160 * this can only be used for page-based addressing. If you need a proper offset
161 * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
162 * drm_vma_node_offset_addr() helper instead.
163 *
164 * RETURNS:
165 * Start address of @node for page-based addressing. 0 if the node does not
166 * have an offset allocated.
167 */
Noralf Trønnesbf38b052017-11-07 20:13:37 +0100168static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
David Herrmannfe3078f2013-07-24 21:06:15 +0200169{
170 return node->vm_node.start;
171}
172
173/**
174 * drm_vma_node_size() - Return size (page-based)
175 * @node: Node to inspect
176 *
177 * Return the size as number of pages for the given node. This is the same size
178 * that was passed to drm_vma_offset_add(). If no offset is allocated for the
179 * node, this is 0.
180 *
181 * RETURNS:
182 * Size of @node as number of pages. 0 if the node does not have an offset
183 * allocated.
184 */
185static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
186{
187 return node->vm_node.size;
188}
189
190/**
David Herrmannfe3078f2013-07-24 21:06:15 +0200191 * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
192 * @node: Linked offset node
193 *
194 * Same as drm_vma_node_start() but returns the address as a valid offset that
195 * can be used for user-space mappings during mmap().
196 * This must not be called on unlinked nodes.
197 *
198 * RETURNS:
199 * Offset of @node for byte-based addressing. 0 if the node does not have an
200 * object allocated.
201 */
202static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
203{
204 return ((__u64)node->vm_node.start) << PAGE_SHIFT;
205}
206
David Herrmann51335df2013-07-24 21:10:03 +0200207/**
208 * drm_vma_node_unmap() - Unmap offset node
209 * @node: Offset node
210 * @file_mapping: Address space to unmap @node from
211 *
212 * Unmap all userspace mappings for a given offset node. The mappings must be
David Herrmann44d847b2013-08-13 19:10:30 +0200213 * associated with the @file_mapping address-space. If no offset exists
214 * nothing is done.
David Herrmann51335df2013-07-24 21:10:03 +0200215 *
216 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
217 * is not called on this node concurrently.
218 */
219static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
220 struct address_space *file_mapping)
221{
Daniel Vetterf74418a2016-03-30 11:40:52 +0200222 if (drm_mm_node_allocated(&node->vm_node))
David Herrmann51335df2013-07-24 21:10:03 +0200223 unmap_mapping_range(file_mapping,
224 drm_vma_node_offset_addr(node),
225 drm_vma_node_size(node) << PAGE_SHIFT, 1);
226}
227
David Herrmann88d7ebe2013-08-25 18:28:57 +0200228/**
229 * drm_vma_node_verify_access() - Access verification helper for TTM
230 * @node: Offset node
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200231 * @tag: Tag of file to check
David Herrmann88d7ebe2013-08-25 18:28:57 +0200232 *
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200233 * This checks whether @tag is granted access to @node. It is the same as
David Herrmann88d7ebe2013-08-25 18:28:57 +0200234 * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
235 * verify_access() callbacks.
236 *
237 * RETURNS:
238 * 0 if access is granted, -EACCES otherwise.
239 */
240static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200241 struct drm_file *tag)
David Herrmann88d7ebe2013-08-25 18:28:57 +0200242{
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200243 return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
David Herrmann88d7ebe2013-08-25 18:28:57 +0200244}
245
David Herrmannfe3078f2013-07-24 21:06:15 +0200246#endif /* __DRM_VMA_MANAGER_H__ */