blob: 61f0e3c3172a25f6bc5535326a36594ebb9eae65 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
34#include <drm/drmP.h>
35#include <drm/drm.h>
36
37#include "amdgpu.h"
38
39struct amdgpu_mn {
40 /* constant after initialisation */
41 struct amdgpu_device *adev;
42 struct mm_struct *mm;
43 struct mmu_notifier mn;
44
45 /* only used on destruction */
46 struct work_struct work;
47
48 /* protected by adev->mn_lock */
49 struct hlist_node node;
50
Christian Königc41d2712016-02-09 16:13:37 +010051 /* objects protected by mm->mmap_sem */
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052 struct rb_root objects;
53};
54
55struct amdgpu_mn_node {
56 struct interval_tree_node it;
57 struct list_head bos;
58};
59
60/**
61 * amdgpu_mn_destroy - destroy the rmn
62 *
63 * @work: previously sheduled work item
64 *
65 * Lazy destroys the notifier from a work item
66 */
67static void amdgpu_mn_destroy(struct work_struct *work)
68{
69 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
70 struct amdgpu_device *adev = rmn->adev;
71 struct amdgpu_mn_node *node, *next_node;
72 struct amdgpu_bo *bo, *next_bo;
73
Christian Königc41d2712016-02-09 16:13:37 +010074 down_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 mutex_lock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 hash_del(&rmn->node);
77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
78 it.rb) {
79
80 interval_tree_remove(&node->it, &rmn->objects);
81 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
82 bo->mn = NULL;
83 list_del_init(&bo->mn_list);
84 }
85 kfree(node);
86 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 mutex_unlock(&adev->mn_lock);
Christian Königc41d2712016-02-09 16:13:37 +010088 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 mmu_notifier_unregister(&rmn->mn, rmn->mm);
90 kfree(rmn);
91}
92
93/**
94 * amdgpu_mn_release - callback to notify about mm destruction
95 *
96 * @mn: our notifier
97 * @mn: the mm this callback is about
98 *
99 * Shedule a work item to lazy destroy our notifier.
100 */
101static void amdgpu_mn_release(struct mmu_notifier *mn,
102 struct mm_struct *mm)
103{
104 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
105 INIT_WORK(&rmn->work, amdgpu_mn_destroy);
106 schedule_work(&rmn->work);
107}
108
109/**
110 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
111 *
112 * @mn: our notifier
113 * @mn: the mm this callback is about
114 * @start: start of updated range
115 * @end: end of updated range
116 *
117 * We block for all BOs between start and end to be idle and
118 * unmap them by move them into system domain again.
119 */
120static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
121 struct mm_struct *mm,
122 unsigned long start,
123 unsigned long end)
124{
125 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
126 struct interval_tree_node *it;
127
128 /* notification is exclusive, but interval is inclusive */
129 end -= 1;
130
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 it = interval_tree_iter_first(&rmn->objects, start, end);
132 while (it) {
133 struct amdgpu_mn_node *node;
134 struct amdgpu_bo *bo;
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800135 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136
137 node = container_of(it, struct amdgpu_mn_node, it);
138 it = interval_tree_iter_next(it, start, end);
139
140 list_for_each_entry(bo, &node->bos, mn_list) {
141
Christian Königd7006962016-02-08 10:57:22 +0100142 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
143 end))
Christian Königa961ea7342015-05-04 13:20:36 +0200144 continue;
145
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400146 r = amdgpu_bo_reserve(bo, true);
147 if (r) {
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800148 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400149 continue;
150 }
151
152 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
153 true, false, MAX_SCHEDULE_TIMEOUT);
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800154 if (r <= 0)
155 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400156
157 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
158 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
159 if (r)
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800160 DRM_ERROR("(%ld) failed to validate user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400161
162 amdgpu_bo_unreserve(bo);
163 }
164 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165}
166
167static const struct mmu_notifier_ops amdgpu_mn_ops = {
168 .release = amdgpu_mn_release,
169 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
170};
171
172/**
173 * amdgpu_mn_get - create notifier context
174 *
175 * @adev: amdgpu device pointer
176 *
177 * Creates a notifier context for current->mm.
178 */
179static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
180{
181 struct mm_struct *mm = current->mm;
182 struct amdgpu_mn *rmn;
183 int r;
184
185 down_write(&mm->mmap_sem);
186 mutex_lock(&adev->mn_lock);
187
188 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
189 if (rmn->mm == mm)
190 goto release_locks;
191
192 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
193 if (!rmn) {
194 rmn = ERR_PTR(-ENOMEM);
195 goto release_locks;
196 }
197
198 rmn->adev = adev;
199 rmn->mm = mm;
200 rmn->mn.ops = &amdgpu_mn_ops;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400201 rmn->objects = RB_ROOT;
202
203 r = __mmu_notifier_register(&rmn->mn, mm);
204 if (r)
205 goto free_rmn;
206
207 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
208
209release_locks:
210 mutex_unlock(&adev->mn_lock);
211 up_write(&mm->mmap_sem);
212
213 return rmn;
214
215free_rmn:
216 mutex_unlock(&adev->mn_lock);
217 up_write(&mm->mmap_sem);
218 kfree(rmn);
219
220 return ERR_PTR(r);
221}
222
223/**
224 * amdgpu_mn_register - register a BO for notifier updates
225 *
226 * @bo: amdgpu buffer object
227 * @addr: userptr addr we should monitor
228 *
229 * Registers an MMU notifier for the given BO at the specified address.
230 * Returns 0 on success, -ERRNO if anything goes wrong.
231 */
232int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
233{
234 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
235 struct amdgpu_device *adev = bo->adev;
236 struct amdgpu_mn *rmn;
237 struct amdgpu_mn_node *node = NULL;
238 struct list_head bos;
239 struct interval_tree_node *it;
240
241 rmn = amdgpu_mn_get(adev);
242 if (IS_ERR(rmn))
243 return PTR_ERR(rmn);
244
245 INIT_LIST_HEAD(&bos);
246
Christian Königc41d2712016-02-09 16:13:37 +0100247 down_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400248
249 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
250 kfree(node);
251 node = container_of(it, struct amdgpu_mn_node, it);
252 interval_tree_remove(&node->it, &rmn->objects);
253 addr = min(it->start, addr);
254 end = max(it->last, end);
255 list_splice(&node->bos, &bos);
256 }
257
258 if (!node) {
259 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
260 if (!node) {
Christian Königc41d2712016-02-09 16:13:37 +0100261 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262 return -ENOMEM;
263 }
264 }
265
266 bo->mn = rmn;
267
268 node->it.start = addr;
269 node->it.last = end;
270 INIT_LIST_HEAD(&node->bos);
271 list_splice(&bos, &node->bos);
272 list_add(&bo->mn_list, &node->bos);
273
274 interval_tree_insert(&node->it, &rmn->objects);
275
Christian Königc41d2712016-02-09 16:13:37 +0100276 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400277
278 return 0;
279}
280
281/**
282 * amdgpu_mn_unregister - unregister a BO for notifier updates
283 *
284 * @bo: amdgpu buffer object
285 *
286 * Remove any registration of MMU notifier updates from the buffer object.
287 */
288void amdgpu_mn_unregister(struct amdgpu_bo *bo)
289{
290 struct amdgpu_device *adev = bo->adev;
Christian Königc41d2712016-02-09 16:13:37 +0100291 struct amdgpu_mn *rmn = bo->mn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400292 struct list_head *head;
293
Christian Königc41d2712016-02-09 16:13:37 +0100294 if (rmn == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400295 return;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400296
Christian Königc41d2712016-02-09 16:13:37 +0100297 down_write(&rmn->mm->mmap_sem);
298 mutex_lock(&adev->mn_lock);
299
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400300 /* save the next list entry for later */
301 head = bo->mn_list.next;
302
303 bo->mn = NULL;
304 list_del(&bo->mn_list);
305
306 if (list_empty(head)) {
307 struct amdgpu_mn_node *node;
308 node = container_of(head, struct amdgpu_mn_node, bos);
309 interval_tree_remove(&node->it, &rmn->objects);
310 kfree(node);
311 }
312
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400313 mutex_unlock(&adev->mn_lock);
Christian Königc41d2712016-02-09 16:13:37 +0100314 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400315}