blob: 9f4a45cd2aab8f5db8c2ce6e086e472697e2f4fe [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
34#include <drm/drmP.h>
35#include <drm/drm.h>
36
37#include "amdgpu.h"
38
39struct amdgpu_mn {
40 /* constant after initialisation */
41 struct amdgpu_device *adev;
42 struct mm_struct *mm;
43 struct mmu_notifier mn;
44
45 /* only used on destruction */
46 struct work_struct work;
47
48 /* protected by adev->mn_lock */
49 struct hlist_node node;
50
Christian König0d2b42b2016-03-18 19:29:51 +010051 /* objects protected by lock */
52 struct mutex lock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053 struct rb_root objects;
54};
55
56struct amdgpu_mn_node {
57 struct interval_tree_node it;
58 struct list_head bos;
59};
60
61/**
62 * amdgpu_mn_destroy - destroy the rmn
63 *
64 * @work: previously sheduled work item
65 *
66 * Lazy destroys the notifier from a work item
67 */
68static void amdgpu_mn_destroy(struct work_struct *work)
69{
70 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
71 struct amdgpu_device *adev = rmn->adev;
72 struct amdgpu_mn_node *node, *next_node;
73 struct amdgpu_bo *bo, *next_bo;
74
75 mutex_lock(&adev->mn_lock);
Christian König0d2b42b2016-03-18 19:29:51 +010076 mutex_lock(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077 hash_del(&rmn->node);
78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
79 it.rb) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -040080 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
81 bo->mn = NULL;
82 list_del_init(&bo->mn_list);
83 }
84 kfree(node);
85 }
Christian König0d2b42b2016-03-18 19:29:51 +010086 mutex_unlock(&rmn->lock);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -050087 mutex_unlock(&adev->mn_lock);
Felix Kuehlingfa5b5002016-01-14 00:35:08 -050088 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 kfree(rmn);
90}
91
92/**
93 * amdgpu_mn_release - callback to notify about mm destruction
94 *
95 * @mn: our notifier
96 * @mn: the mm this callback is about
97 *
98 * Shedule a work item to lazy destroy our notifier.
99 */
100static void amdgpu_mn_release(struct mmu_notifier *mn,
101 struct mm_struct *mm)
102{
103 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
104 INIT_WORK(&rmn->work, amdgpu_mn_destroy);
105 schedule_work(&rmn->work);
106}
107
108/**
Christian Königae20f122016-03-18 19:29:52 +0100109 * amdgpu_mn_invalidate_node - unmap all BOs of a node
110 *
111 * @node: the node with the BOs to unmap
112 *
113 * We block for all BOs and unmap them by move them
114 * into system domain again.
115 */
116static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
117 unsigned long start,
118 unsigned long end)
119{
120 struct amdgpu_bo *bo;
121 long r;
122
123 list_for_each_entry(bo, &node->bos, mn_list) {
124
125 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
126 continue;
127
128 r = amdgpu_bo_reserve(bo, true);
129 if (r) {
130 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
131 continue;
132 }
133
134 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
135 true, false, MAX_SCHEDULE_TIMEOUT);
136 if (r <= 0)
137 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
138
139 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
140 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
141 if (r)
142 DRM_ERROR("(%ld) failed to validate user bo\n", r);
143
144 amdgpu_bo_unreserve(bo);
145 }
146}
147
148/**
149 * amdgpu_mn_invalidate_page - callback to notify about mm change
150 *
151 * @mn: our notifier
152 * @mn: the mm this callback is about
153 * @address: address of invalidate page
154 *
155 * Invalidation of a single page. Blocks for all BOs mapping it
156 * and unmap them by move them into system domain again.
157 */
158static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
159 struct mm_struct *mm,
160 unsigned long address)
161{
162 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
163 struct interval_tree_node *it;
164
165 mutex_lock(&rmn->lock);
166
167 it = interval_tree_iter_first(&rmn->objects, address, address);
168 if (it) {
169 struct amdgpu_mn_node *node;
170
171 node = container_of(it, struct amdgpu_mn_node, it);
172 amdgpu_mn_invalidate_node(node, address, address);
173 }
174
175 mutex_unlock(&rmn->lock);
176}
177
178/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400179 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
180 *
181 * @mn: our notifier
182 * @mn: the mm this callback is about
183 * @start: start of updated range
184 * @end: end of updated range
185 *
186 * We block for all BOs between start and end to be idle and
187 * unmap them by move them into system domain again.
188 */
189static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
190 struct mm_struct *mm,
191 unsigned long start,
192 unsigned long end)
193{
194 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
195 struct interval_tree_node *it;
196
197 /* notification is exclusive, but interval is inclusive */
198 end -= 1;
199
Christian König0d2b42b2016-03-18 19:29:51 +0100200 mutex_lock(&rmn->lock);
201
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 it = interval_tree_iter_first(&rmn->objects, start, end);
203 while (it) {
204 struct amdgpu_mn_node *node;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400205
206 node = container_of(it, struct amdgpu_mn_node, it);
207 it = interval_tree_iter_next(it, start, end);
208
Christian Königae20f122016-03-18 19:29:52 +0100209 amdgpu_mn_invalidate_node(node, start, end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400210 }
Christian König0d2b42b2016-03-18 19:29:51 +0100211
212 mutex_unlock(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213}
214
215static const struct mmu_notifier_ops amdgpu_mn_ops = {
216 .release = amdgpu_mn_release,
Christian Königae20f122016-03-18 19:29:52 +0100217 .invalidate_page = amdgpu_mn_invalidate_page,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400218 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
219};
220
221/**
222 * amdgpu_mn_get - create notifier context
223 *
224 * @adev: amdgpu device pointer
225 *
226 * Creates a notifier context for current->mm.
227 */
228static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
229{
230 struct mm_struct *mm = current->mm;
231 struct amdgpu_mn *rmn;
232 int r;
233
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400234 mutex_lock(&adev->mn_lock);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500235 down_write(&mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400236
237 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
238 if (rmn->mm == mm)
239 goto release_locks;
240
241 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
242 if (!rmn) {
243 rmn = ERR_PTR(-ENOMEM);
244 goto release_locks;
245 }
246
247 rmn->adev = adev;
248 rmn->mm = mm;
249 rmn->mn.ops = &amdgpu_mn_ops;
Christian König0d2b42b2016-03-18 19:29:51 +0100250 mutex_init(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400251 rmn->objects = RB_ROOT;
252
253 r = __mmu_notifier_register(&rmn->mn, mm);
254 if (r)
255 goto free_rmn;
256
257 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
258
259release_locks:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400260 up_write(&mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500261 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262
263 return rmn;
264
265free_rmn:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266 up_write(&mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500267 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400268 kfree(rmn);
269
270 return ERR_PTR(r);
271}
272
273/**
274 * amdgpu_mn_register - register a BO for notifier updates
275 *
276 * @bo: amdgpu buffer object
277 * @addr: userptr addr we should monitor
278 *
279 * Registers an MMU notifier for the given BO at the specified address.
280 * Returns 0 on success, -ERRNO if anything goes wrong.
281 */
282int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
283{
284 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
285 struct amdgpu_device *adev = bo->adev;
286 struct amdgpu_mn *rmn;
287 struct amdgpu_mn_node *node = NULL;
288 struct list_head bos;
289 struct interval_tree_node *it;
290
291 rmn = amdgpu_mn_get(adev);
292 if (IS_ERR(rmn))
293 return PTR_ERR(rmn);
294
295 INIT_LIST_HEAD(&bos);
296
Christian König0d2b42b2016-03-18 19:29:51 +0100297 mutex_lock(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400298
299 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
300 kfree(node);
301 node = container_of(it, struct amdgpu_mn_node, it);
302 interval_tree_remove(&node->it, &rmn->objects);
303 addr = min(it->start, addr);
304 end = max(it->last, end);
305 list_splice(&node->bos, &bos);
306 }
307
308 if (!node) {
309 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
310 if (!node) {
Christian König0d2b42b2016-03-18 19:29:51 +0100311 mutex_unlock(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400312 return -ENOMEM;
313 }
314 }
315
316 bo->mn = rmn;
317
318 node->it.start = addr;
319 node->it.last = end;
320 INIT_LIST_HEAD(&node->bos);
321 list_splice(&bos, &node->bos);
322 list_add(&bo->mn_list, &node->bos);
323
324 interval_tree_insert(&node->it, &rmn->objects);
325
Christian König0d2b42b2016-03-18 19:29:51 +0100326 mutex_unlock(&rmn->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400327
328 return 0;
329}
330
331/**
332 * amdgpu_mn_unregister - unregister a BO for notifier updates
333 *
334 * @bo: amdgpu buffer object
335 *
336 * Remove any registration of MMU notifier updates from the buffer object.
337 */
338void amdgpu_mn_unregister(struct amdgpu_bo *bo)
339{
340 struct amdgpu_device *adev = bo->adev;
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500341 struct amdgpu_mn *rmn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342 struct list_head *head;
343
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500344 mutex_lock(&adev->mn_lock);
345
346 rmn = bo->mn;
347 if (rmn == NULL) {
348 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400349 return;
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500350 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351
Christian König0d2b42b2016-03-18 19:29:51 +0100352 mutex_lock(&rmn->lock);
Christian Königc41d2712016-02-09 16:13:37 +0100353
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400354 /* save the next list entry for later */
355 head = bo->mn_list.next;
356
357 bo->mn = NULL;
358 list_del(&bo->mn_list);
359
360 if (list_empty(head)) {
361 struct amdgpu_mn_node *node;
362 node = container_of(head, struct amdgpu_mn_node, bos);
363 interval_tree_remove(&node->it, &rmn->objects);
364 kfree(node);
365 }
366
Christian König0d2b42b2016-03-18 19:29:51 +0100367 mutex_unlock(&rmn->lock);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500368 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400369}