blob: 0157bc2f11f8bd93fd20a2553e953af73c2d511c [file] [log] [blame]
Christian König341cb9e2014-08-07 09:36:03 +02001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <linux/mmu_notifier.h>
34#include <drm/drmP.h>
35#include <drm/drm.h>
36
37#include "radeon.h"
38
39struct radeon_mn {
40 /* constant after initialisation */
41 struct radeon_device *rdev;
42 struct mm_struct *mm;
43 struct mmu_notifier mn;
44
45 /* only used on destruction */
46 struct work_struct work;
47
48 /* protected by rdev->mn_lock */
49 struct hlist_node node;
50
51 /* objects protected by lock */
52 struct mutex lock;
53 struct rb_root objects;
54};
55
56/**
57 * radeon_mn_destroy - destroy the rmn
58 *
59 * @work: previously sheduled work item
60 *
61 * Lazy destroys the notifier from a work item
62 */
63static void radeon_mn_destroy(struct work_struct *work)
64{
65 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
66 struct radeon_device *rdev = rmn->rdev;
67 struct radeon_bo *bo, *next;
68
69 mutex_lock(&rdev->mn_lock);
70 mutex_lock(&rmn->lock);
71 hash_del(&rmn->node);
72 rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
73 interval_tree_remove(&bo->mn_it, &rmn->objects);
74 bo->mn = NULL;
75 }
76 mutex_unlock(&rmn->lock);
77 mutex_unlock(&rdev->mn_lock);
78 mmu_notifier_unregister(&rmn->mn, rmn->mm);
79 kfree(rmn);
80}
81
82/**
83 * radeon_mn_release - callback to notify about mm destruction
84 *
85 * @mn: our notifier
86 * @mn: the mm this callback is about
87 *
88 * Shedule a work item to lazy destroy our notifier.
89 */
90static void radeon_mn_release(struct mmu_notifier *mn,
91 struct mm_struct *mm)
92{
93 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
94 INIT_WORK(&rmn->work, radeon_mn_destroy);
95 schedule_work(&rmn->work);
96}
97
98/**
99 * radeon_mn_invalidate_range_start - callback to notify about mm change
100 *
101 * @mn: our notifier
102 * @mn: the mm this callback is about
103 * @start: start of updated range
104 * @end: end of updated range
105 *
106 * We block for all BOs between start and end to be idle and
107 * unmap them by move them into system domain again.
108 */
109static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
110 struct mm_struct *mm,
111 unsigned long start,
112 unsigned long end)
113{
114 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
115 struct interval_tree_node *it;
116
117 /* notification is exclusive, but interval is inclusive */
118 end -= 1;
119
120 mutex_lock(&rmn->lock);
121
122 it = interval_tree_iter_first(&rmn->objects, start, end);
123 while (it) {
124 struct radeon_bo *bo;
125 int r;
126
127 bo = container_of(it, struct radeon_bo, mn_it);
128 it = interval_tree_iter_next(it, start, end);
129
130 r = radeon_bo_reserve(bo, true);
131 if (r) {
132 DRM_ERROR("(%d) failed to reserve user bo\n", r);
133 continue;
134 }
135
136 if (bo->tbo.sync_obj) {
137 r = radeon_fence_wait(bo->tbo.sync_obj, false);
138 if (r)
139 DRM_ERROR("(%d) failed to wait for user bo\n", r);
140 }
141
142 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
143 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
144 if (r)
145 DRM_ERROR("(%d) failed to validate user bo\n", r);
146
147 radeon_bo_unreserve(bo);
148 }
149
150 mutex_unlock(&rmn->lock);
151}
152
153static const struct mmu_notifier_ops radeon_mn_ops = {
154 .release = radeon_mn_release,
155 .invalidate_range_start = radeon_mn_invalidate_range_start,
156};
157
158/**
159 * radeon_mn_get - create notifier context
160 *
161 * @rdev: radeon device pointer
162 *
163 * Creates a notifier context for current->mm.
164 */
165static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
166{
167 struct mm_struct *mm = current->mm;
168 struct radeon_mn *rmn;
169 int r;
170
171 down_write(&mm->mmap_sem);
172 mutex_lock(&rdev->mn_lock);
173
174 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
175 if (rmn->mm == mm)
176 goto release_locks;
177
178 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
179 if (!rmn) {
180 rmn = ERR_PTR(-ENOMEM);
181 goto release_locks;
182 }
183
184 rmn->rdev = rdev;
185 rmn->mm = mm;
186 rmn->mn.ops = &radeon_mn_ops;
187 mutex_init(&rmn->lock);
188 rmn->objects = RB_ROOT;
189
190 r = __mmu_notifier_register(&rmn->mn, mm);
191 if (r)
192 goto free_rmn;
193
194 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
195
196release_locks:
197 mutex_unlock(&rdev->mn_lock);
198 up_write(&mm->mmap_sem);
199
200 return rmn;
201
202free_rmn:
203 mutex_unlock(&rdev->mn_lock);
204 up_write(&mm->mmap_sem);
205 kfree(rmn);
206
207 return ERR_PTR(r);
208}
209
210/**
211 * radeon_mn_register - register a BO for notifier updates
212 *
213 * @bo: radeon buffer object
214 * @addr: userptr addr we should monitor
215 *
216 * Registers an MMU notifier for the given BO at the specified address.
217 * Returns 0 on success, -ERRNO if anything goes wrong.
218 */
219int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
220{
221 unsigned long end = addr + radeon_bo_size(bo) - 1;
222 struct radeon_device *rdev = bo->rdev;
223 struct radeon_mn *rmn;
224 struct interval_tree_node *it;
225
226 rmn = radeon_mn_get(rdev);
227 if (IS_ERR(rmn))
228 return PTR_ERR(rmn);
229
230 mutex_lock(&rmn->lock);
231
232 it = interval_tree_iter_first(&rmn->objects, addr, end);
233 if (it) {
234 mutex_unlock(&rmn->lock);
235 return -EEXIST;
236 }
237
238 bo->mn = rmn;
239 bo->mn_it.start = addr;
240 bo->mn_it.last = end;
241 interval_tree_insert(&bo->mn_it, &rmn->objects);
242
243 mutex_unlock(&rmn->lock);
244
245 return 0;
246}
247
248/**
249 * radeon_mn_unregister - unregister a BO for notifier updates
250 *
251 * @bo: radeon buffer object
252 *
253 * Remove any registration of MMU notifier updates from the buffer object.
254 */
255void radeon_mn_unregister(struct radeon_bo *bo)
256{
257 struct radeon_device *rdev = bo->rdev;
258 struct radeon_mn *rmn;
259
260 mutex_lock(&rdev->mn_lock);
261 rmn = bo->mn;
262 if (rmn == NULL) {
263 mutex_unlock(&rdev->mn_lock);
264 return;
265 }
266
267 mutex_lock(&rmn->lock);
268 interval_tree_remove(&bo->mn_it, &rmn->objects);
269 bo->mn = NULL;
270 mutex_unlock(&rmn->lock);
271 mutex_unlock(&rdev->mn_lock);
272}