blob: d12dff92f3ca93cd18610865e4488894ce0f0b6d [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
34#include <drm/drmP.h>
35#include <drm/drm.h>
36
37#include "amdgpu.h"
38
39struct amdgpu_mn {
40 /* constant after initialisation */
41 struct amdgpu_device *adev;
42 struct mm_struct *mm;
43 struct mmu_notifier mn;
44
45 /* only used on destruction */
46 struct work_struct work;
47
48 /* protected by adev->mn_lock */
49 struct hlist_node node;
50
Christian Königc41d2712016-02-09 16:13:37 +010051 /* objects protected by mm->mmap_sem */
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052 struct rb_root objects;
53};
54
55struct amdgpu_mn_node {
56 struct interval_tree_node it;
57 struct list_head bos;
58};
59
60/**
61 * amdgpu_mn_destroy - destroy the rmn
62 *
63 * @work: previously sheduled work item
64 *
65 * Lazy destroys the notifier from a work item
66 */
67static void amdgpu_mn_destroy(struct work_struct *work)
68{
69 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
70 struct amdgpu_device *adev = rmn->adev;
71 struct amdgpu_mn_node *node, *next_node;
72 struct amdgpu_bo *bo, *next_bo;
73
74 mutex_lock(&adev->mn_lock);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -050075 down_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 hash_del(&rmn->node);
77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
78 it.rb) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -040079 interval_tree_remove(&node->it, &rmn->objects);
80 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
81 bo->mn = NULL;
82 list_del_init(&bo->mn_list);
83 }
84 kfree(node);
85 }
Christian Königc41d2712016-02-09 16:13:37 +010086 up_write(&rmn->mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -050087 mutex_unlock(&adev->mn_lock);
Felix Kuehlingfa5b5002016-01-14 00:35:08 -050088 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 kfree(rmn);
90}
91
92/**
93 * amdgpu_mn_release - callback to notify about mm destruction
94 *
95 * @mn: our notifier
96 * @mn: the mm this callback is about
97 *
98 * Shedule a work item to lazy destroy our notifier.
99 */
100static void amdgpu_mn_release(struct mmu_notifier *mn,
101 struct mm_struct *mm)
102{
103 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
104 INIT_WORK(&rmn->work, amdgpu_mn_destroy);
105 schedule_work(&rmn->work);
106}
107
108/**
109 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
110 *
111 * @mn: our notifier
112 * @mn: the mm this callback is about
113 * @start: start of updated range
114 * @end: end of updated range
115 *
116 * We block for all BOs between start and end to be idle and
117 * unmap them by move them into system domain again.
118 */
119static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
120 struct mm_struct *mm,
121 unsigned long start,
122 unsigned long end)
123{
124 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
125 struct interval_tree_node *it;
126
127 /* notification is exclusive, but interval is inclusive */
128 end -= 1;
129
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 it = interval_tree_iter_first(&rmn->objects, start, end);
131 while (it) {
132 struct amdgpu_mn_node *node;
133 struct amdgpu_bo *bo;
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800134 long r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135
136 node = container_of(it, struct amdgpu_mn_node, it);
137 it = interval_tree_iter_next(it, start, end);
138
139 list_for_each_entry(bo, &node->bos, mn_list) {
140
Christian Königd7006962016-02-08 10:57:22 +0100141 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
142 end))
Christian Königa961ea7342015-05-04 13:20:36 +0200143 continue;
144
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400145 r = amdgpu_bo_reserve(bo, true);
146 if (r) {
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800147 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400148 continue;
149 }
150
151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
152 true, false, MAX_SCHEDULE_TIMEOUT);
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800153 if (r <= 0)
154 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155
156 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
157 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
158 if (r)
Jack Xiao7ab7e8a2015-04-27 13:45:40 +0800159 DRM_ERROR("(%ld) failed to validate user bo\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400160
161 amdgpu_bo_unreserve(bo);
162 }
163 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164}
165
166static const struct mmu_notifier_ops amdgpu_mn_ops = {
167 .release = amdgpu_mn_release,
168 .invalidate_range_start = amdgpu_mn_invalidate_range_start,
169};
170
171/**
172 * amdgpu_mn_get - create notifier context
173 *
174 * @adev: amdgpu device pointer
175 *
176 * Creates a notifier context for current->mm.
177 */
178static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
179{
180 struct mm_struct *mm = current->mm;
181 struct amdgpu_mn *rmn;
182 int r;
183
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400184 mutex_lock(&adev->mn_lock);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500185 down_write(&mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186
187 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
188 if (rmn->mm == mm)
189 goto release_locks;
190
191 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
192 if (!rmn) {
193 rmn = ERR_PTR(-ENOMEM);
194 goto release_locks;
195 }
196
197 rmn->adev = adev;
198 rmn->mm = mm;
199 rmn->mn.ops = &amdgpu_mn_ops;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400200 rmn->objects = RB_ROOT;
201
202 r = __mmu_notifier_register(&rmn->mn, mm);
203 if (r)
204 goto free_rmn;
205
206 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
207
208release_locks:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400209 up_write(&mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500210 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400211
212 return rmn;
213
214free_rmn:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400215 up_write(&mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500216 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400217 kfree(rmn);
218
219 return ERR_PTR(r);
220}
221
222/**
223 * amdgpu_mn_register - register a BO for notifier updates
224 *
225 * @bo: amdgpu buffer object
226 * @addr: userptr addr we should monitor
227 *
228 * Registers an MMU notifier for the given BO at the specified address.
229 * Returns 0 on success, -ERRNO if anything goes wrong.
230 */
231int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
232{
233 unsigned long end = addr + amdgpu_bo_size(bo) - 1;
234 struct amdgpu_device *adev = bo->adev;
235 struct amdgpu_mn *rmn;
236 struct amdgpu_mn_node *node = NULL;
237 struct list_head bos;
238 struct interval_tree_node *it;
239
240 rmn = amdgpu_mn_get(adev);
241 if (IS_ERR(rmn))
242 return PTR_ERR(rmn);
243
244 INIT_LIST_HEAD(&bos);
245
Christian Königc41d2712016-02-09 16:13:37 +0100246 down_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400247
248 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
249 kfree(node);
250 node = container_of(it, struct amdgpu_mn_node, it);
251 interval_tree_remove(&node->it, &rmn->objects);
252 addr = min(it->start, addr);
253 end = max(it->last, end);
254 list_splice(&node->bos, &bos);
255 }
256
257 if (!node) {
258 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
259 if (!node) {
Christian Königc41d2712016-02-09 16:13:37 +0100260 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400261 return -ENOMEM;
262 }
263 }
264
265 bo->mn = rmn;
266
267 node->it.start = addr;
268 node->it.last = end;
269 INIT_LIST_HEAD(&node->bos);
270 list_splice(&bos, &node->bos);
271 list_add(&bo->mn_list, &node->bos);
272
273 interval_tree_insert(&node->it, &rmn->objects);
274
Christian Königc41d2712016-02-09 16:13:37 +0100275 up_write(&rmn->mm->mmap_sem);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400276
277 return 0;
278}
279
280/**
281 * amdgpu_mn_unregister - unregister a BO for notifier updates
282 *
283 * @bo: amdgpu buffer object
284 *
285 * Remove any registration of MMU notifier updates from the buffer object.
286 */
287void amdgpu_mn_unregister(struct amdgpu_bo *bo)
288{
289 struct amdgpu_device *adev = bo->adev;
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500290 struct amdgpu_mn *rmn;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400291 struct list_head *head;
292
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500293 mutex_lock(&adev->mn_lock);
294
295 rmn = bo->mn;
296 if (rmn == NULL) {
297 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400298 return;
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500299 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400300
Christian Königc41d2712016-02-09 16:13:37 +0100301 down_write(&rmn->mm->mmap_sem);
Christian Königc41d2712016-02-09 16:13:37 +0100302
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400303 /* save the next list entry for later */
304 head = bo->mn_list.next;
305
306 bo->mn = NULL;
307 list_del(&bo->mn_list);
308
309 if (list_empty(head)) {
310 struct amdgpu_mn_node *node;
311 node = container_of(head, struct amdgpu_mn_node, bos);
312 interval_tree_remove(&node->it, &rmn->objects);
313 kfree(node);
314 }
315
Christian Königc41d2712016-02-09 16:13:37 +0100316 up_write(&rmn->mm->mmap_sem);
Felix Kuehlingb8ea3782016-02-16 15:29:23 -0500317 mutex_unlock(&adev->mn_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400318}