blob: 161f7fbf5b767e3f143e2455cf38b2cb147e610b [file] [log] [blame]
Chris Wilson5cc9ed42014-05-16 14:22:37 +01001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Maarten Lankhorstb588c922015-05-13 09:56:00 +020025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Chris Wilson5cc9ed42014-05-16 14:22:37 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
Chris Wilsonad46cb52014-08-07 14:20:40 +010035struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
Chris Wilson5cc9ed42014-05-16 14:22:37 +010044#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010052 struct list_head linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010053 unsigned long serial;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010054 bool has_linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010055};
56
57struct i915_mmu_object {
Chris Wilsonad46cb52014-08-07 14:20:40 +010058 struct i915_mmu_notifier *mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010059 struct interval_tree_node it;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010060 struct list_head link;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010061 struct drm_i915_gem_object *obj;
Chris Wilsone4b946b2015-10-01 12:34:46 +010062 bool active;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010063 bool is_linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010064};
65
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010066static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
67{
68 struct drm_device *dev = obj->base.dev;
69 unsigned long end;
70
71 mutex_lock(&dev->struct_mutex);
72 /* Cancel any active worker and force us to re-evaluate gup */
73 obj->userptr.work = NULL;
74
75 if (obj->pages != NULL) {
76 struct drm_i915_private *dev_priv = to_i915(dev);
77 struct i915_vma *vma, *tmp;
78 bool was_interruptible;
79
80 was_interruptible = dev_priv->mm.interruptible;
81 dev_priv->mm.interruptible = false;
82
83 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
84 int ret = i915_vma_unbind(vma);
85 WARN_ON(ret && ret != -EIO);
86 }
87 WARN_ON(i915_gem_object_put_pages(obj));
88
89 dev_priv->mm.interruptible = was_interruptible;
90 }
91
92 end = obj->userptr.ptr + obj->base.size;
93
94 drm_gem_object_unreference(&obj->base);
95 mutex_unlock(&dev->struct_mutex);
96
97 return end;
98}
99
Chris Wilson48777762014-07-24 13:28:44 +0100100static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
101 struct mm_struct *mm,
102 unsigned long start,
103 unsigned long end)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100104{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100105 struct i915_mmu_object *mo;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100106 unsigned long serial;
107
108restart:
109 serial = mn->serial;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100110 list_for_each_entry(mo, &mn->linear, link) {
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100111 struct drm_i915_gem_object *obj;
112
Chris Wilsonad46cb52014-08-07 14:20:40 +0100113 if (mo->it.last < start || mo->it.start > end)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100114 continue;
115
Chris Wilsonad46cb52014-08-07 14:20:40 +0100116 obj = mo->obj;
Michał Winiarski460822b2015-02-03 15:48:17 +0100117
Chris Wilsone4b946b2015-10-01 12:34:46 +0100118 if (!mo->active ||
119 !kref_get_unless_zero(&obj->base.refcount))
Michał Winiarski460822b2015-02-03 15:48:17 +0100120 continue;
121
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100122 spin_unlock(&mn->lock);
123
124 cancel_userptr(obj);
125
126 spin_lock(&mn->lock);
127 if (serial != mn->serial)
128 goto restart;
129 }
130
Chris Wilson48777762014-07-24 13:28:44 +0100131 return NULL;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100132}
133
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100134static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
135 struct mm_struct *mm,
136 unsigned long start,
137 unsigned long end)
138{
139 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
140 struct interval_tree_node *it = NULL;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100141 unsigned long next = start;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100142 unsigned long serial = 0;
143
144 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100145 while (next < end) {
Chris Wilson48777762014-07-24 13:28:44 +0100146 struct drm_i915_gem_object *obj = NULL;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100147
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100148 spin_lock(&mn->lock);
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100149 if (mn->has_linear)
Chris Wilson48777762014-07-24 13:28:44 +0100150 it = invalidate_range__linear(mn, mm, start, end);
151 else if (serial == mn->serial)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100152 it = interval_tree_iter_next(it, next, end);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100153 else
154 it = interval_tree_iter_first(&mn->objects, start, end);
155 if (it != NULL) {
Chris Wilsone4b946b2015-10-01 12:34:46 +0100156 struct i915_mmu_object *mo =
157 container_of(it, struct i915_mmu_object, it);
Michał Winiarski460822b2015-02-03 15:48:17 +0100158
159 /* The mmu_object is released late when destroying the
160 * GEM object so it is entirely possible to gain a
161 * reference on an object in the process of being freed
162 * since our serialisation is via the spinlock and not
163 * the struct_mutex - and consequently use it after it
164 * is freed and then double free it.
165 */
Chris Wilsone4b946b2015-10-01 12:34:46 +0100166 if (mo->active &&
167 kref_get_unless_zero(&mo->obj->base.refcount))
168 obj = mo->obj;
Michał Winiarski460822b2015-02-03 15:48:17 +0100169
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100170 serial = mn->serial;
171 }
172 spin_unlock(&mn->lock);
173 if (obj == NULL)
174 return;
175
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100176 next = cancel_userptr(obj);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100177 }
178}
179
180static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
181 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
182};
183
184static struct i915_mmu_notifier *
Chris Wilsonad46cb52014-08-07 14:20:40 +0100185i915_mmu_notifier_create(struct mm_struct *mm)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100186{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100187 struct i915_mmu_notifier *mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100188 int ret;
189
Chris Wilsonad46cb52014-08-07 14:20:40 +0100190 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
191 if (mn == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100192 return ERR_PTR(-ENOMEM);
193
Chris Wilsonad46cb52014-08-07 14:20:40 +0100194 spin_lock_init(&mn->lock);
195 mn->mn.ops = &i915_gem_userptr_notifier;
196 mn->objects = RB_ROOT;
197 mn->serial = 1;
198 INIT_LIST_HEAD(&mn->linear);
199 mn->has_linear = false;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100200
Chris Wilsonad46cb52014-08-07 14:20:40 +0100201 /* Protected by mmap_sem (write-lock) */
202 ret = __mmu_notifier_register(&mn->mn, mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100203 if (ret) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100204 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100205 return ERR_PTR(ret);
206 }
207
Chris Wilsonad46cb52014-08-07 14:20:40 +0100208 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100209}
210
Chris Wilsonad46cb52014-08-07 14:20:40 +0100211static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100212{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100213 if (++mn->serial == 0)
214 mn->serial = 1;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100215}
216
217static int
Chris Wilsonad46cb52014-08-07 14:20:40 +0100218i915_mmu_notifier_add(struct drm_device *dev,
219 struct i915_mmu_notifier *mn,
220 struct i915_mmu_object *mo)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100221{
222 struct interval_tree_node *it;
Chris Wilson281400f2015-05-15 11:42:21 +0100223 int ret = 0;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100224
Chris Wilson281400f2015-05-15 11:42:21 +0100225 /* By this point we have already done a lot of expensive setup that
226 * we do not want to repeat just because the caller (e.g. X) has a
227 * signal pending (and partly because of that expensive setup, X
228 * using an interrupt timer is likely to get stuck in an EINTR loop).
229 */
230 mutex_lock(&dev->struct_mutex);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100231
232 /* Make sure we drop the final active reference (and thereby
233 * remove the objects from the interval tree) before we do
234 * the check for overlapping objects.
235 */
Chris Wilsonad46cb52014-08-07 14:20:40 +0100236 i915_gem_retire_requests(dev);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100237
Chris Wilsonad46cb52014-08-07 14:20:40 +0100238 spin_lock(&mn->lock);
239 it = interval_tree_iter_first(&mn->objects,
240 mo->it.start, mo->it.last);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100241 if (it) {
242 struct drm_i915_gem_object *obj;
243
244 /* We only need to check the first object in the range as it
245 * either has cancelled gup work queued and we need to
246 * return back to the user to give time for the gup-workers
247 * to flush their object references upon which the object will
248 * be removed from the interval-tree, or the the range is
249 * still in use by another client and the overlap is invalid.
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100250 *
251 * If we do have an overlap, we cannot use the interval tree
252 * for fast range invalidation.
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100253 */
254
255 obj = container_of(it, struct i915_mmu_object, it)->obj;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100256 if (!obj->userptr.workers)
Chris Wilsonad46cb52014-08-07 14:20:40 +0100257 mn->has_linear = mo->is_linear = true;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100258 else
259 ret = -EAGAIN;
260 } else
Chris Wilsonad46cb52014-08-07 14:20:40 +0100261 interval_tree_insert(&mo->it, &mn->objects);
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100262
263 if (ret == 0) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100264 list_add(&mo->link, &mn->linear);
265 __i915_mmu_notifier_update_serial(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100266 }
Chris Wilsonad46cb52014-08-07 14:20:40 +0100267 spin_unlock(&mn->lock);
268 mutex_unlock(&dev->struct_mutex);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100269
270 return ret;
271}
272
Chris Wilsonad46cb52014-08-07 14:20:40 +0100273static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
274{
275 struct i915_mmu_object *mo;
276
277 list_for_each_entry(mo, &mn->linear, link)
278 if (mo->is_linear)
279 return true;
280
281 return false;
282}
283
284static void
285i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
286 struct i915_mmu_object *mo)
287{
288 spin_lock(&mn->lock);
289 list_del(&mo->link);
290 if (mo->is_linear)
291 mn->has_linear = i915_mmu_notifier_has_linear(mn);
292 else
293 interval_tree_remove(&mo->it, &mn->objects);
294 __i915_mmu_notifier_update_serial(mn);
295 spin_unlock(&mn->lock);
296}
297
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100298static void
299i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
300{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100301 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100302
Chris Wilsonad46cb52014-08-07 14:20:40 +0100303 mo = obj->userptr.mmu_object;
304 if (mo == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100305 return;
306
Chris Wilsonad46cb52014-08-07 14:20:40 +0100307 i915_mmu_notifier_del(mo->mn, mo);
308 kfree(mo);
309
310 obj->userptr.mmu_object = NULL;
311}
312
313static struct i915_mmu_notifier *
314i915_mmu_notifier_find(struct i915_mm_struct *mm)
315{
Chris Wilsone9681362014-09-26 10:31:02 +0100316 struct i915_mmu_notifier *mn = mm->mn;
317
318 mn = mm->mn;
319 if (mn)
320 return mn;
321
322 down_write(&mm->mm->mmap_sem);
323 mutex_lock(&to_i915(mm->dev)->mm_lock);
324 if ((mn = mm->mn) == NULL) {
325 mn = i915_mmu_notifier_create(mm->mm);
326 if (!IS_ERR(mn))
327 mm->mn = mn;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100328 }
Chris Wilsone9681362014-09-26 10:31:02 +0100329 mutex_unlock(&to_i915(mm->dev)->mm_lock);
330 up_write(&mm->mm->mmap_sem);
331
332 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100333}
334
335static int
336i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
337 unsigned flags)
338{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100339 struct i915_mmu_notifier *mn;
340 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100341 int ret;
342
343 if (flags & I915_USERPTR_UNSYNCHRONIZED)
344 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
345
Chris Wilsonad46cb52014-08-07 14:20:40 +0100346 if (WARN_ON(obj->userptr.mm == NULL))
347 return -EINVAL;
348
349 mn = i915_mmu_notifier_find(obj->userptr.mm);
350 if (IS_ERR(mn))
351 return PTR_ERR(mn);
352
353 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
354 if (mo == NULL)
355 return -ENOMEM;
356
357 mo->mn = mn;
358 mo->it.start = obj->userptr.ptr;
359 mo->it.last = mo->it.start + obj->base.size - 1;
360 mo->obj = obj;
361
362 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
363 if (ret) {
364 kfree(mo);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100365 return ret;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100366 }
367
Chris Wilsonad46cb52014-08-07 14:20:40 +0100368 obj->userptr.mmu_object = mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100369 return 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100370}
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100371
Chris Wilsonad46cb52014-08-07 14:20:40 +0100372static void
373i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
374 struct mm_struct *mm)
375{
376 if (mn == NULL)
377 return;
378
379 mmu_notifier_unregister(&mn->mn, mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100380 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100381}
382
383#else
384
385static void
386i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
387{
388}
389
390static int
391i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
392 unsigned flags)
393{
394 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
395 return -ENODEV;
396
397 if (!capable(CAP_SYS_ADMIN))
398 return -EPERM;
399
400 return 0;
401}
Chris Wilsonad46cb52014-08-07 14:20:40 +0100402
403static void
404i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
405 struct mm_struct *mm)
406{
407}
408
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100409#endif
410
Chris Wilsonad46cb52014-08-07 14:20:40 +0100411static struct i915_mm_struct *
412__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
413{
414 struct i915_mm_struct *mm;
415
416 /* Protected by dev_priv->mm_lock */
417 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
418 if (mm->mm == real)
419 return mm;
420
421 return NULL;
422}
423
424static int
425i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
426{
427 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
428 struct i915_mm_struct *mm;
429 int ret = 0;
430
431 /* During release of the GEM object we hold the struct_mutex. This
432 * precludes us from calling mmput() at that time as that may be
433 * the last reference and so call exit_mmap(). exit_mmap() will
434 * attempt to reap the vma, and if we were holding a GTT mmap
435 * would then call drm_gem_vm_close() and attempt to reacquire
436 * the struct mutex. So in order to avoid that recursion, we have
437 * to defer releasing the mm reference until after we drop the
438 * struct_mutex, i.e. we need to schedule a worker to do the clean
439 * up.
440 */
441 mutex_lock(&dev_priv->mm_lock);
442 mm = __i915_mm_struct_find(dev_priv, current->mm);
443 if (mm == NULL) {
444 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
445 if (mm == NULL) {
446 ret = -ENOMEM;
447 goto out;
448 }
449
450 kref_init(&mm->kref);
451 mm->dev = obj->base.dev;
452
453 mm->mm = current->mm;
454 atomic_inc(&current->mm->mm_count);
455
456 mm->mn = NULL;
457
458 /* Protected by dev_priv->mm_lock */
459 hash_add(dev_priv->mm_structs,
460 &mm->node, (unsigned long)mm->mm);
461 } else
462 kref_get(&mm->kref);
463
464 obj->userptr.mm = mm;
465out:
466 mutex_unlock(&dev_priv->mm_lock);
467 return ret;
468}
469
470static void
471__i915_mm_struct_free__worker(struct work_struct *work)
472{
473 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
474 i915_mmu_notifier_free(mm->mn, mm->mm);
475 mmdrop(mm->mm);
476 kfree(mm);
477}
478
479static void
480__i915_mm_struct_free(struct kref *kref)
481{
482 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
483
484 /* Protected by dev_priv->mm_lock */
485 hash_del(&mm->node);
486 mutex_unlock(&to_i915(mm->dev)->mm_lock);
487
488 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
489 schedule_work(&mm->work);
490}
491
492static void
493i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
494{
495 if (obj->userptr.mm == NULL)
496 return;
497
498 kref_put_mutex(&obj->userptr.mm->kref,
499 __i915_mm_struct_free,
500 &to_i915(obj->base.dev)->mm_lock);
501 obj->userptr.mm = NULL;
502}
503
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100504struct get_pages_work {
505 struct work_struct work;
506 struct drm_i915_gem_object *obj;
507 struct task_struct *task;
508};
509
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100510#if IS_ENABLED(CONFIG_SWIOTLB)
511#define swiotlb_active() swiotlb_nr_tbl()
512#else
513#define swiotlb_active() 0
514#endif
515
516static int
517st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
518{
519 struct scatterlist *sg;
520 int ret, n;
521
522 *st = kmalloc(sizeof(**st), GFP_KERNEL);
523 if (*st == NULL)
524 return -ENOMEM;
525
526 if (swiotlb_active()) {
527 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
528 if (ret)
529 goto err;
530
531 for_each_sg((*st)->sgl, sg, num_pages, n)
532 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
533 } else {
534 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
535 0, num_pages << PAGE_SHIFT,
536 GFP_KERNEL);
537 if (ret)
538 goto err;
539 }
540
541 return 0;
542
543err:
544 kfree(*st);
545 *st = NULL;
546 return ret;
547}
548
Imre Deake2273302015-07-09 12:59:05 +0300549static int
550__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
551 struct page **pvec, int num_pages)
552{
553 int ret;
554
555 ret = st_set_pages(&obj->pages, pvec, num_pages);
556 if (ret)
557 return ret;
558
559 ret = i915_gem_gtt_prepare_object(obj);
560 if (ret) {
561 sg_free_table(obj->pages);
562 kfree(obj->pages);
563 obj->pages = NULL;
564 }
565
566 return ret;
567}
568
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100569static void
Chris Wilsone4b946b2015-10-01 12:34:46 +0100570__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
571 bool value)
572{
573 /* During mm_invalidate_range we need to cancel any userptr that
574 * overlaps the range being invalidated. Doing so requires the
575 * struct_mutex, and that risks recursion. In order to cause
576 * recursion, the user must alias the userptr address space with
577 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
578 * to invalidate that mmaping, mm_invalidate_range is called with
579 * the userptr address *and* the struct_mutex held. To prevent that
580 * we set a flag under the i915_mmu_notifier spinlock to indicate
581 * whether this object is valid.
582 */
583#if defined(CONFIG_MMU_NOTIFIER)
584 if (obj->userptr.mmu_object == NULL)
585 return;
586
587 spin_lock(&obj->userptr.mmu_object->mn->lock);
588 obj->userptr.mmu_object->active = value;
589 spin_unlock(&obj->userptr.mmu_object->mn->lock);
590#endif
591}
592
593static void
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100594__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
595{
596 struct get_pages_work *work = container_of(_work, typeof(*work), work);
597 struct drm_i915_gem_object *obj = work->obj;
598 struct drm_device *dev = obj->base.dev;
Chris Wilson68d6c842015-10-01 12:34:45 +0100599 const int npages = obj->base.size >> PAGE_SHIFT;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100600 struct page **pvec;
601 int pinned, ret;
602
603 ret = -ENOMEM;
604 pinned = 0;
605
Chris Wilson68d6c842015-10-01 12:34:45 +0100606 pvec = kmalloc(npages*sizeof(struct page *),
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100607 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
608 if (pvec == NULL)
Chris Wilson68d6c842015-10-01 12:34:45 +0100609 pvec = drm_malloc_ab(npages, sizeof(struct page *));
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100610 if (pvec != NULL) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100611 struct mm_struct *mm = obj->userptr.mm->mm;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100612
613 down_read(&mm->mmap_sem);
Chris Wilson68d6c842015-10-01 12:34:45 +0100614 while (pinned < npages) {
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100615 ret = get_user_pages(work->task, mm,
616 obj->userptr.ptr + pinned * PAGE_SIZE,
Chris Wilson68d6c842015-10-01 12:34:45 +0100617 npages - pinned,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100618 !obj->userptr.read_only, 0,
619 pvec + pinned, NULL);
620 if (ret < 0)
621 break;
622
623 pinned += ret;
624 }
625 up_read(&mm->mmap_sem);
626 }
627
628 mutex_lock(&dev->struct_mutex);
Chris Wilson68d6c842015-10-01 12:34:45 +0100629 if (obj->userptr.work == &work->work) {
630 if (pinned == npages) {
631 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
632 if (ret == 0) {
633 list_add_tail(&obj->global_list,
634 &to_i915(dev)->mm.unbound_list);
635 obj->get_page.sg = obj->pages->sgl;
636 obj->get_page.last = 0;
637 pinned = 0;
638 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100639 }
Chris Wilson68d6c842015-10-01 12:34:45 +0100640 obj->userptr.work = ERR_PTR(ret);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100641 if (ret)
642 __i915_gem_userptr_set_active(obj, false);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100643 }
644
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100645 obj->userptr.workers--;
646 drm_gem_object_unreference(&obj->base);
647 mutex_unlock(&dev->struct_mutex);
648
649 release_pages(pvec, pinned, 0);
650 drm_free_large(pvec);
651
652 put_task_struct(work->task);
653 kfree(work);
654}
655
656static int
Chris Wilsone4b946b2015-10-01 12:34:46 +0100657__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
658 bool *active)
659{
660 struct get_pages_work *work;
661
662 /* Spawn a worker so that we can acquire the
663 * user pages without holding our mutex. Access
664 * to the user pages requires mmap_sem, and we have
665 * a strict lock ordering of mmap_sem, struct_mutex -
666 * we already hold struct_mutex here and so cannot
667 * call gup without encountering a lock inversion.
668 *
669 * Userspace will keep on repeating the operation
670 * (thanks to EAGAIN) until either we hit the fast
671 * path or the worker completes. If the worker is
672 * cancelled or superseded, the task is still run
673 * but the results ignored. (This leads to
674 * complications that we may have a stray object
675 * refcount that we need to be wary of when
676 * checking for existing objects during creation.)
677 * If the worker encounters an error, it reports
678 * that error back to this function through
679 * obj->userptr.work = ERR_PTR.
680 */
681 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
682 return -EAGAIN;
683
684 work = kmalloc(sizeof(*work), GFP_KERNEL);
685 if (work == NULL)
686 return -ENOMEM;
687
688 obj->userptr.work = &work->work;
689 obj->userptr.workers++;
690
691 work->obj = obj;
692 drm_gem_object_reference(&obj->base);
693
694 work->task = current;
695 get_task_struct(work->task);
696
697 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
698 schedule_work(&work->work);
699
700 *active = true;
701 return -EAGAIN;
702}
703
704static int
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100705i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706{
707 const int num_pages = obj->base.size >> PAGE_SHIFT;
708 struct page **pvec;
709 int pinned, ret;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100710 bool active;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100711
712 /* If userspace should engineer that these pages are replaced in
713 * the vma between us binding this page into the GTT and completion
714 * of rendering... Their loss. If they change the mapping of their
715 * pages they need to create a new bo to point to the new vma.
716 *
717 * However, that still leaves open the possibility of the vma
718 * being copied upon fork. Which falls under the same userspace
719 * synchronisation issue as a regular bo, except that this time
720 * the process may not be expecting that a particular piece of
721 * memory is tied to the GPU.
722 *
723 * Fortunately, we can hook into the mmu_notifier in order to
724 * discard the page references prior to anything nasty happening
725 * to the vma (discard or cloning) which should prevent the more
726 * egregious cases from causing harm.
727 */
Chris Wilsone4b946b2015-10-01 12:34:46 +0100728 if (IS_ERR(obj->userptr.work)) {
729 /* active flag will have been dropped already by the worker */
730 ret = PTR_ERR(obj->userptr.work);
731 obj->userptr.work = NULL;
732 return ret;
733 }
734 if (obj->userptr.work)
735 /* active flag should still be held for the pending work */
736 return -EAGAIN;
737
738 /* Let the mmu-notifier know that we have begun and need cancellation */
739 __i915_gem_userptr_set_active(obj, true);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100740
741 pvec = NULL;
742 pinned = 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100743 if (obj->userptr.mm->mm == current->mm) {
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100744 pvec = kmalloc(num_pages*sizeof(struct page *),
745 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
746 if (pvec == NULL) {
747 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
Chris Wilsone4b946b2015-10-01 12:34:46 +0100748 if (pvec == NULL) {
749 __i915_gem_userptr_set_active(obj, false);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100750 return -ENOMEM;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100751 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100752 }
753
754 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
755 !obj->userptr.read_only, pvec);
756 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100757
Chris Wilsone4b946b2015-10-01 12:34:46 +0100758 active = false;
759 if (pinned < 0)
760 ret = pinned, pinned = 0;
761 else if (pinned < num_pages)
762 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
763 else
Imre Deake2273302015-07-09 12:59:05 +0300764 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100765 if (ret) {
766 __i915_gem_userptr_set_active(obj, active);
767 release_pages(pvec, pinned, 0);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100768 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100769 drm_free_large(pvec);
770 return ret;
771}
772
773static void
774i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
775{
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100776 struct sg_page_iter sg_iter;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100777
778 BUG_ON(obj->userptr.work != NULL);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100779 __i915_gem_userptr_set_active(obj, false);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100780
781 if (obj->madv != I915_MADV_WILLNEED)
782 obj->dirty = 0;
783
Imre Deake2273302015-07-09 12:59:05 +0300784 i915_gem_gtt_finish_object(obj);
785
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100786 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
787 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100788
789 if (obj->dirty)
790 set_page_dirty(page);
791
792 mark_page_accessed(page);
793 page_cache_release(page);
794 }
795 obj->dirty = 0;
796
797 sg_free_table(obj->pages);
798 kfree(obj->pages);
799}
800
801static void
802i915_gem_userptr_release(struct drm_i915_gem_object *obj)
803{
804 i915_gem_userptr_release__mmu_notifier(obj);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100805 i915_gem_userptr_release__mm_struct(obj);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100806}
807
808static int
809i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
810{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100811 if (obj->userptr.mmu_object)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100812 return 0;
813
814 return i915_gem_userptr_init__mmu_notifier(obj, 0);
815}
816
817static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
818 .dmabuf_export = i915_gem_userptr_dmabuf_export,
819 .get_pages = i915_gem_userptr_get_pages,
820 .put_pages = i915_gem_userptr_put_pages,
821 .release = i915_gem_userptr_release,
822};
823
824/**
825 * Creates a new mm object that wraps some normal memory from the process
826 * context - user memory.
827 *
828 * We impose several restrictions upon the memory being mapped
829 * into the GPU.
830 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100831 * 2. It must be normal system memory, not a pointer into another map of IO
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100832 * space (e.g. it must not be a GTT mmapping of another object).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100833 * 3. We only allow a bo as large as we could in theory map into the GTT,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100834 * that is we limit the size to the total size of the GTT.
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100835 * 4. The bo is marked as being snoopable. The backing pages are left
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100836 * accessible directly by the CPU, but reads and writes by the GPU may
837 * incur the cost of a snoop (unless you have an LLC architecture).
838 *
839 * Synchronisation between multiple users and the GPU is left to userspace
840 * through the normal set-domain-ioctl. The kernel will enforce that the
841 * GPU relinquishes the VMA before it is returned back to the system
842 * i.e. upon free(), munmap() or process termination. However, the userspace
843 * malloc() library may not immediately relinquish the VMA after free() and
844 * instead reuse it whilst the GPU is still reading and writing to the VMA.
845 * Caveat emptor.
846 *
847 * Also note, that the object created here is not currently a "first class"
848 * object, in that several ioctls are banned. These are the CPU access
849 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
850 * direct access via your pointer rather than use those ioctls.
851 *
852 * If you think this is a good interface to use to pass GPU memory between
853 * drivers, please use dma-buf instead. In fact, wherever possible use
854 * dma-buf instead.
855 */
856int
857i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
858{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100859 struct drm_i915_gem_userptr *args = data;
860 struct drm_i915_gem_object *obj;
861 int ret;
862 u32 handle;
863
864 if (args->flags & ~(I915_USERPTR_READ_ONLY |
865 I915_USERPTR_UNSYNCHRONIZED))
866 return -EINVAL;
867
868 if (offset_in_page(args->user_ptr | args->user_size))
869 return -EINVAL;
870
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100871 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
872 (char __user *)(unsigned long)args->user_ptr, args->user_size))
873 return -EFAULT;
874
875 if (args->flags & I915_USERPTR_READ_ONLY) {
876 /* On almost all of the current hw, we cannot tell the GPU that a
877 * page is readonly, so this is just a placeholder in the uAPI.
878 */
879 return -ENODEV;
880 }
881
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100882 obj = i915_gem_object_alloc(dev);
883 if (obj == NULL)
884 return -ENOMEM;
885
886 drm_gem_private_object_init(dev, &obj->base, args->user_size);
887 i915_gem_object_init(obj, &i915_gem_userptr_ops);
888 obj->cache_level = I915_CACHE_LLC;
889 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
890 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
891
892 obj->userptr.ptr = args->user_ptr;
893 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
894
895 /* And keep a pointer to the current->mm for resolving the user pages
896 * at binding. This means that we need to hook into the mmu_notifier
897 * in order to detect if the mmu is destroyed.
898 */
Chris Wilsonad46cb52014-08-07 14:20:40 +0100899 ret = i915_gem_userptr_init__mm_struct(obj);
900 if (ret == 0)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100901 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
902 if (ret == 0)
903 ret = drm_gem_handle_create(file, &obj->base, &handle);
904
905 /* drop reference from allocate - handle holds it now */
906 drm_gem_object_unreference_unlocked(&obj->base);
907 if (ret)
908 return ret;
909
910 args->handle = handle;
911 return 0;
912}
913
914int
915i915_gem_init_userptr(struct drm_device *dev)
916{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100917 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100918 mutex_init(&dev_priv->mm_lock);
919 hash_init(dev_priv->mm_structs);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100920 return 0;
921}