blob: d182058383a9ef7264339101265e511ac8fa0609 [file] [log] [blame]
Chris Wilson5cc9ed42014-05-16 14:22:37 +01001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "i915_drm.h"
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
Chris Wilsonad46cb52014-08-07 14:20:40 +010035struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
Chris Wilson5cc9ed42014-05-16 14:22:37 +010044#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010052 struct list_head linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010053 unsigned long serial;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010054 bool has_linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010055};
56
57struct i915_mmu_object {
Chris Wilsonad46cb52014-08-07 14:20:40 +010058 struct i915_mmu_notifier *mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010059 struct interval_tree_node it;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010060 struct list_head link;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010061 struct drm_i915_gem_object *obj;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010062 bool is_linear;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010063};
64
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010065static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
66{
67 struct drm_device *dev = obj->base.dev;
68 unsigned long end;
69
70 mutex_lock(&dev->struct_mutex);
71 /* Cancel any active worker and force us to re-evaluate gup */
72 obj->userptr.work = NULL;
73
74 if (obj->pages != NULL) {
75 struct drm_i915_private *dev_priv = to_i915(dev);
76 struct i915_vma *vma, *tmp;
77 bool was_interruptible;
78
79 was_interruptible = dev_priv->mm.interruptible;
80 dev_priv->mm.interruptible = false;
81
82 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
83 int ret = i915_vma_unbind(vma);
84 WARN_ON(ret && ret != -EIO);
85 }
86 WARN_ON(i915_gem_object_put_pages(obj));
87
88 dev_priv->mm.interruptible = was_interruptible;
89 }
90
91 end = obj->userptr.ptr + obj->base.size;
92
93 drm_gem_object_unreference(&obj->base);
94 mutex_unlock(&dev->struct_mutex);
95
96 return end;
97}
98
Chris Wilson48777762014-07-24 13:28:44 +010099static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
100 struct mm_struct *mm,
101 unsigned long start,
102 unsigned long end)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100103{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100104 struct i915_mmu_object *mo;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100105 unsigned long serial;
106
107restart:
108 serial = mn->serial;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100109 list_for_each_entry(mo, &mn->linear, link) {
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100110 struct drm_i915_gem_object *obj;
111
Chris Wilsonad46cb52014-08-07 14:20:40 +0100112 if (mo->it.last < start || mo->it.start > end)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100113 continue;
114
Chris Wilsonad46cb52014-08-07 14:20:40 +0100115 obj = mo->obj;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100116 drm_gem_object_reference(&obj->base);
117 spin_unlock(&mn->lock);
118
119 cancel_userptr(obj);
120
121 spin_lock(&mn->lock);
122 if (serial != mn->serial)
123 goto restart;
124 }
125
Chris Wilson48777762014-07-24 13:28:44 +0100126 return NULL;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100127}
128
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100129static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
130 struct mm_struct *mm,
131 unsigned long start,
132 unsigned long end)
133{
134 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
135 struct interval_tree_node *it = NULL;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100136 unsigned long next = start;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100137 unsigned long serial = 0;
138
139 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100140 while (next < end) {
Chris Wilson48777762014-07-24 13:28:44 +0100141 struct drm_i915_gem_object *obj = NULL;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100142
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100143 spin_lock(&mn->lock);
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100144 if (mn->has_linear)
Chris Wilson48777762014-07-24 13:28:44 +0100145 it = invalidate_range__linear(mn, mm, start, end);
146 else if (serial == mn->serial)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100147 it = interval_tree_iter_next(it, next, end);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100148 else
149 it = interval_tree_iter_first(&mn->objects, start, end);
150 if (it != NULL) {
151 obj = container_of(it, struct i915_mmu_object, it)->obj;
152 drm_gem_object_reference(&obj->base);
153 serial = mn->serial;
154 }
155 spin_unlock(&mn->lock);
156 if (obj == NULL)
157 return;
158
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100159 next = cancel_userptr(obj);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100160 }
161}
162
163static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
164 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
165};
166
167static struct i915_mmu_notifier *
Chris Wilsonad46cb52014-08-07 14:20:40 +0100168i915_mmu_notifier_create(struct mm_struct *mm)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100169{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100170 struct i915_mmu_notifier *mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100171 int ret;
172
Chris Wilsonad46cb52014-08-07 14:20:40 +0100173 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
174 if (mn == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100175 return ERR_PTR(-ENOMEM);
176
Chris Wilsonad46cb52014-08-07 14:20:40 +0100177 spin_lock_init(&mn->lock);
178 mn->mn.ops = &i915_gem_userptr_notifier;
179 mn->objects = RB_ROOT;
180 mn->serial = 1;
181 INIT_LIST_HEAD(&mn->linear);
182 mn->has_linear = false;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100183
Chris Wilsonad46cb52014-08-07 14:20:40 +0100184 /* Protected by mmap_sem (write-lock) */
185 ret = __mmu_notifier_register(&mn->mn, mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100186 if (ret) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100187 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100188 return ERR_PTR(ret);
189 }
190
Chris Wilsonad46cb52014-08-07 14:20:40 +0100191 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100192}
193
Chris Wilsonad46cb52014-08-07 14:20:40 +0100194static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100195{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100196 if (++mn->serial == 0)
197 mn->serial = 1;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100198}
199
200static int
Chris Wilsonad46cb52014-08-07 14:20:40 +0100201i915_mmu_notifier_add(struct drm_device *dev,
202 struct i915_mmu_notifier *mn,
203 struct i915_mmu_object *mo)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100204{
205 struct interval_tree_node *it;
206 int ret;
207
Chris Wilsonad46cb52014-08-07 14:20:40 +0100208 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100209 if (ret)
210 return ret;
211
212 /* Make sure we drop the final active reference (and thereby
213 * remove the objects from the interval tree) before we do
214 * the check for overlapping objects.
215 */
Chris Wilsonad46cb52014-08-07 14:20:40 +0100216 i915_gem_retire_requests(dev);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100217
Chris Wilsonad46cb52014-08-07 14:20:40 +0100218 spin_lock(&mn->lock);
219 it = interval_tree_iter_first(&mn->objects,
220 mo->it.start, mo->it.last);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100221 if (it) {
222 struct drm_i915_gem_object *obj;
223
224 /* We only need to check the first object in the range as it
225 * either has cancelled gup work queued and we need to
226 * return back to the user to give time for the gup-workers
227 * to flush their object references upon which the object will
228 * be removed from the interval-tree, or the the range is
229 * still in use by another client and the overlap is invalid.
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100230 *
231 * If we do have an overlap, we cannot use the interval tree
232 * for fast range invalidation.
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100233 */
234
235 obj = container_of(it, struct i915_mmu_object, it)->obj;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100236 if (!obj->userptr.workers)
Chris Wilsonad46cb52014-08-07 14:20:40 +0100237 mn->has_linear = mo->is_linear = true;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100238 else
239 ret = -EAGAIN;
240 } else
Chris Wilsonad46cb52014-08-07 14:20:40 +0100241 interval_tree_insert(&mo->it, &mn->objects);
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100242
243 if (ret == 0) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100244 list_add(&mo->link, &mn->linear);
245 __i915_mmu_notifier_update_serial(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100246 }
Chris Wilsonad46cb52014-08-07 14:20:40 +0100247 spin_unlock(&mn->lock);
248 mutex_unlock(&dev->struct_mutex);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100249
250 return ret;
251}
252
Chris Wilsonad46cb52014-08-07 14:20:40 +0100253static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
254{
255 struct i915_mmu_object *mo;
256
257 list_for_each_entry(mo, &mn->linear, link)
258 if (mo->is_linear)
259 return true;
260
261 return false;
262}
263
264static void
265i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
266 struct i915_mmu_object *mo)
267{
268 spin_lock(&mn->lock);
269 list_del(&mo->link);
270 if (mo->is_linear)
271 mn->has_linear = i915_mmu_notifier_has_linear(mn);
272 else
273 interval_tree_remove(&mo->it, &mn->objects);
274 __i915_mmu_notifier_update_serial(mn);
275 spin_unlock(&mn->lock);
276}
277
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100278static void
279i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
280{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100281 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100282
Chris Wilsonad46cb52014-08-07 14:20:40 +0100283 mo = obj->userptr.mmu_object;
284 if (mo == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100285 return;
286
Chris Wilsonad46cb52014-08-07 14:20:40 +0100287 i915_mmu_notifier_del(mo->mn, mo);
288 kfree(mo);
289
290 obj->userptr.mmu_object = NULL;
291}
292
293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{
Chris Wilsone9681362014-09-26 10:31:02 +0100296 struct i915_mmu_notifier *mn = mm->mn;
297
298 mn = mm->mn;
299 if (mn)
300 return mn;
301
302 down_write(&mm->mm->mmap_sem);
303 mutex_lock(&to_i915(mm->dev)->mm_lock);
304 if ((mn = mm->mn) == NULL) {
305 mn = i915_mmu_notifier_create(mm->mm);
306 if (!IS_ERR(mn))
307 mm->mn = mn;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100308 }
Chris Wilsone9681362014-09-26 10:31:02 +0100309 mutex_unlock(&to_i915(mm->dev)->mm_lock);
310 up_write(&mm->mm->mmap_sem);
311
312 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100313}
314
315static int
316i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
317 unsigned flags)
318{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100319 struct i915_mmu_notifier *mn;
320 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100321 int ret;
322
323 if (flags & I915_USERPTR_UNSYNCHRONIZED)
324 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
325
Chris Wilsonad46cb52014-08-07 14:20:40 +0100326 if (WARN_ON(obj->userptr.mm == NULL))
327 return -EINVAL;
328
329 mn = i915_mmu_notifier_find(obj->userptr.mm);
330 if (IS_ERR(mn))
331 return PTR_ERR(mn);
332
333 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
334 if (mo == NULL)
335 return -ENOMEM;
336
337 mo->mn = mn;
338 mo->it.start = obj->userptr.ptr;
339 mo->it.last = mo->it.start + obj->base.size - 1;
340 mo->obj = obj;
341
342 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
343 if (ret) {
344 kfree(mo);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100345 return ret;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100346 }
347
Chris Wilsonad46cb52014-08-07 14:20:40 +0100348 obj->userptr.mmu_object = mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100349 return 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100350}
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100351
Chris Wilsonad46cb52014-08-07 14:20:40 +0100352static void
353i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
354 struct mm_struct *mm)
355{
356 if (mn == NULL)
357 return;
358
359 mmu_notifier_unregister(&mn->mn, mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100360 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100361}
362
363#else
364
365static void
366i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
367{
368}
369
370static int
371i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
372 unsigned flags)
373{
374 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
375 return -ENODEV;
376
377 if (!capable(CAP_SYS_ADMIN))
378 return -EPERM;
379
380 return 0;
381}
Chris Wilsonad46cb52014-08-07 14:20:40 +0100382
383static void
384i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
385 struct mm_struct *mm)
386{
387}
388
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100389#endif
390
Chris Wilsonad46cb52014-08-07 14:20:40 +0100391static struct i915_mm_struct *
392__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
393{
394 struct i915_mm_struct *mm;
395
396 /* Protected by dev_priv->mm_lock */
397 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
398 if (mm->mm == real)
399 return mm;
400
401 return NULL;
402}
403
404static int
405i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
406{
407 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
408 struct i915_mm_struct *mm;
409 int ret = 0;
410
411 /* During release of the GEM object we hold the struct_mutex. This
412 * precludes us from calling mmput() at that time as that may be
413 * the last reference and so call exit_mmap(). exit_mmap() will
414 * attempt to reap the vma, and if we were holding a GTT mmap
415 * would then call drm_gem_vm_close() and attempt to reacquire
416 * the struct mutex. So in order to avoid that recursion, we have
417 * to defer releasing the mm reference until after we drop the
418 * struct_mutex, i.e. we need to schedule a worker to do the clean
419 * up.
420 */
421 mutex_lock(&dev_priv->mm_lock);
422 mm = __i915_mm_struct_find(dev_priv, current->mm);
423 if (mm == NULL) {
424 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
425 if (mm == NULL) {
426 ret = -ENOMEM;
427 goto out;
428 }
429
430 kref_init(&mm->kref);
431 mm->dev = obj->base.dev;
432
433 mm->mm = current->mm;
434 atomic_inc(&current->mm->mm_count);
435
436 mm->mn = NULL;
437
438 /* Protected by dev_priv->mm_lock */
439 hash_add(dev_priv->mm_structs,
440 &mm->node, (unsigned long)mm->mm);
441 } else
442 kref_get(&mm->kref);
443
444 obj->userptr.mm = mm;
445out:
446 mutex_unlock(&dev_priv->mm_lock);
447 return ret;
448}
449
450static void
451__i915_mm_struct_free__worker(struct work_struct *work)
452{
453 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
454 i915_mmu_notifier_free(mm->mn, mm->mm);
455 mmdrop(mm->mm);
456 kfree(mm);
457}
458
459static void
460__i915_mm_struct_free(struct kref *kref)
461{
462 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
463
464 /* Protected by dev_priv->mm_lock */
465 hash_del(&mm->node);
466 mutex_unlock(&to_i915(mm->dev)->mm_lock);
467
468 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
469 schedule_work(&mm->work);
470}
471
472static void
473i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
474{
475 if (obj->userptr.mm == NULL)
476 return;
477
478 kref_put_mutex(&obj->userptr.mm->kref,
479 __i915_mm_struct_free,
480 &to_i915(obj->base.dev)->mm_lock);
481 obj->userptr.mm = NULL;
482}
483
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100484struct get_pages_work {
485 struct work_struct work;
486 struct drm_i915_gem_object *obj;
487 struct task_struct *task;
488};
489
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100490#if IS_ENABLED(CONFIG_SWIOTLB)
491#define swiotlb_active() swiotlb_nr_tbl()
492#else
493#define swiotlb_active() 0
494#endif
495
496static int
497st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
498{
499 struct scatterlist *sg;
500 int ret, n;
501
502 *st = kmalloc(sizeof(**st), GFP_KERNEL);
503 if (*st == NULL)
504 return -ENOMEM;
505
506 if (swiotlb_active()) {
507 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
508 if (ret)
509 goto err;
510
511 for_each_sg((*st)->sgl, sg, num_pages, n)
512 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
513 } else {
514 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
515 0, num_pages << PAGE_SHIFT,
516 GFP_KERNEL);
517 if (ret)
518 goto err;
519 }
520
521 return 0;
522
523err:
524 kfree(*st);
525 *st = NULL;
526 return ret;
527}
528
529static void
530__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
531{
532 struct get_pages_work *work = container_of(_work, typeof(*work), work);
533 struct drm_i915_gem_object *obj = work->obj;
534 struct drm_device *dev = obj->base.dev;
535 const int num_pages = obj->base.size >> PAGE_SHIFT;
536 struct page **pvec;
537 int pinned, ret;
538
539 ret = -ENOMEM;
540 pinned = 0;
541
542 pvec = kmalloc(num_pages*sizeof(struct page *),
543 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
544 if (pvec == NULL)
545 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
546 if (pvec != NULL) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100547 struct mm_struct *mm = obj->userptr.mm->mm;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100548
549 down_read(&mm->mmap_sem);
550 while (pinned < num_pages) {
551 ret = get_user_pages(work->task, mm,
552 obj->userptr.ptr + pinned * PAGE_SIZE,
553 num_pages - pinned,
554 !obj->userptr.read_only, 0,
555 pvec + pinned, NULL);
556 if (ret < 0)
557 break;
558
559 pinned += ret;
560 }
561 up_read(&mm->mmap_sem);
562 }
563
564 mutex_lock(&dev->struct_mutex);
565 if (obj->userptr.work != &work->work) {
566 ret = 0;
567 } else if (pinned == num_pages) {
568 ret = st_set_pages(&obj->pages, pvec, num_pages);
569 if (ret == 0) {
570 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
571 pinned = 0;
572 }
573 }
574
575 obj->userptr.work = ERR_PTR(ret);
576 obj->userptr.workers--;
577 drm_gem_object_unreference(&obj->base);
578 mutex_unlock(&dev->struct_mutex);
579
580 release_pages(pvec, pinned, 0);
581 drm_free_large(pvec);
582
583 put_task_struct(work->task);
584 kfree(work);
585}
586
587static int
588i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
589{
590 const int num_pages = obj->base.size >> PAGE_SHIFT;
591 struct page **pvec;
592 int pinned, ret;
593
594 /* If userspace should engineer that these pages are replaced in
595 * the vma between us binding this page into the GTT and completion
596 * of rendering... Their loss. If they change the mapping of their
597 * pages they need to create a new bo to point to the new vma.
598 *
599 * However, that still leaves open the possibility of the vma
600 * being copied upon fork. Which falls under the same userspace
601 * synchronisation issue as a regular bo, except that this time
602 * the process may not be expecting that a particular piece of
603 * memory is tied to the GPU.
604 *
605 * Fortunately, we can hook into the mmu_notifier in order to
606 * discard the page references prior to anything nasty happening
607 * to the vma (discard or cloning) which should prevent the more
608 * egregious cases from causing harm.
609 */
610
611 pvec = NULL;
612 pinned = 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100613 if (obj->userptr.mm->mm == current->mm) {
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100614 pvec = kmalloc(num_pages*sizeof(struct page *),
615 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
616 if (pvec == NULL) {
617 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
618 if (pvec == NULL)
619 return -ENOMEM;
620 }
621
622 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
623 !obj->userptr.read_only, pvec);
624 }
625 if (pinned < num_pages) {
626 if (pinned < 0) {
627 ret = pinned;
628 pinned = 0;
629 } else {
630 /* Spawn a worker so that we can acquire the
631 * user pages without holding our mutex. Access
632 * to the user pages requires mmap_sem, and we have
633 * a strict lock ordering of mmap_sem, struct_mutex -
634 * we already hold struct_mutex here and so cannot
635 * call gup without encountering a lock inversion.
636 *
637 * Userspace will keep on repeating the operation
638 * (thanks to EAGAIN) until either we hit the fast
639 * path or the worker completes. If the worker is
640 * cancelled or superseded, the task is still run
641 * but the results ignored. (This leads to
642 * complications that we may have a stray object
643 * refcount that we need to be wary of when
644 * checking for existing objects during creation.)
645 * If the worker encounters an error, it reports
646 * that error back to this function through
647 * obj->userptr.work = ERR_PTR.
648 */
649 ret = -EAGAIN;
650 if (obj->userptr.work == NULL &&
651 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
652 struct get_pages_work *work;
653
654 work = kmalloc(sizeof(*work), GFP_KERNEL);
655 if (work != NULL) {
656 obj->userptr.work = &work->work;
657 obj->userptr.workers++;
658
659 work->obj = obj;
660 drm_gem_object_reference(&obj->base);
661
662 work->task = current;
663 get_task_struct(work->task);
664
665 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
666 schedule_work(&work->work);
667 } else
668 ret = -ENOMEM;
669 } else {
670 if (IS_ERR(obj->userptr.work)) {
671 ret = PTR_ERR(obj->userptr.work);
672 obj->userptr.work = NULL;
673 }
674 }
675 }
676 } else {
677 ret = st_set_pages(&obj->pages, pvec, num_pages);
678 if (ret == 0) {
679 obj->userptr.work = NULL;
680 pinned = 0;
681 }
682 }
683
684 release_pages(pvec, pinned, 0);
685 drm_free_large(pvec);
686 return ret;
687}
688
689static void
690i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
691{
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100692 struct sg_page_iter sg_iter;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100693
694 BUG_ON(obj->userptr.work != NULL);
695
696 if (obj->madv != I915_MADV_WILLNEED)
697 obj->dirty = 0;
698
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100699 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
700 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100701
702 if (obj->dirty)
703 set_page_dirty(page);
704
705 mark_page_accessed(page);
706 page_cache_release(page);
707 }
708 obj->dirty = 0;
709
710 sg_free_table(obj->pages);
711 kfree(obj->pages);
712}
713
714static void
715i915_gem_userptr_release(struct drm_i915_gem_object *obj)
716{
717 i915_gem_userptr_release__mmu_notifier(obj);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100718 i915_gem_userptr_release__mm_struct(obj);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100719}
720
721static int
722i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
723{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100724 if (obj->userptr.mmu_object)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100725 return 0;
726
727 return i915_gem_userptr_init__mmu_notifier(obj, 0);
728}
729
730static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
731 .dmabuf_export = i915_gem_userptr_dmabuf_export,
732 .get_pages = i915_gem_userptr_get_pages,
733 .put_pages = i915_gem_userptr_put_pages,
734 .release = i915_gem_userptr_release,
735};
736
737/**
738 * Creates a new mm object that wraps some normal memory from the process
739 * context - user memory.
740 *
741 * We impose several restrictions upon the memory being mapped
742 * into the GPU.
743 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100744 * 2. It must be normal system memory, not a pointer into another map of IO
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100745 * space (e.g. it must not be a GTT mmapping of another object).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100746 * 3. We only allow a bo as large as we could in theory map into the GTT,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100747 * that is we limit the size to the total size of the GTT.
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100748 * 4. The bo is marked as being snoopable. The backing pages are left
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100749 * accessible directly by the CPU, but reads and writes by the GPU may
750 * incur the cost of a snoop (unless you have an LLC architecture).
751 *
752 * Synchronisation between multiple users and the GPU is left to userspace
753 * through the normal set-domain-ioctl. The kernel will enforce that the
754 * GPU relinquishes the VMA before it is returned back to the system
755 * i.e. upon free(), munmap() or process termination. However, the userspace
756 * malloc() library may not immediately relinquish the VMA after free() and
757 * instead reuse it whilst the GPU is still reading and writing to the VMA.
758 * Caveat emptor.
759 *
760 * Also note, that the object created here is not currently a "first class"
761 * object, in that several ioctls are banned. These are the CPU access
762 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
763 * direct access via your pointer rather than use those ioctls.
764 *
765 * If you think this is a good interface to use to pass GPU memory between
766 * drivers, please use dma-buf instead. In fact, wherever possible use
767 * dma-buf instead.
768 */
769int
770i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
771{
772 struct drm_i915_private *dev_priv = dev->dev_private;
773 struct drm_i915_gem_userptr *args = data;
774 struct drm_i915_gem_object *obj;
775 int ret;
776 u32 handle;
777
778 if (args->flags & ~(I915_USERPTR_READ_ONLY |
779 I915_USERPTR_UNSYNCHRONIZED))
780 return -EINVAL;
781
782 if (offset_in_page(args->user_ptr | args->user_size))
783 return -EINVAL;
784
785 if (args->user_size > dev_priv->gtt.base.total)
786 return -E2BIG;
787
788 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
789 (char __user *)(unsigned long)args->user_ptr, args->user_size))
790 return -EFAULT;
791
792 if (args->flags & I915_USERPTR_READ_ONLY) {
793 /* On almost all of the current hw, we cannot tell the GPU that a
794 * page is readonly, so this is just a placeholder in the uAPI.
795 */
796 return -ENODEV;
797 }
798
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100799 obj = i915_gem_object_alloc(dev);
800 if (obj == NULL)
801 return -ENOMEM;
802
803 drm_gem_private_object_init(dev, &obj->base, args->user_size);
804 i915_gem_object_init(obj, &i915_gem_userptr_ops);
805 obj->cache_level = I915_CACHE_LLC;
806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
807 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
808
809 obj->userptr.ptr = args->user_ptr;
810 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
811
812 /* And keep a pointer to the current->mm for resolving the user pages
813 * at binding. This means that we need to hook into the mmu_notifier
814 * in order to detect if the mmu is destroyed.
815 */
Chris Wilsonad46cb52014-08-07 14:20:40 +0100816 ret = i915_gem_userptr_init__mm_struct(obj);
817 if (ret == 0)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100818 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
819 if (ret == 0)
820 ret = drm_gem_handle_create(file, &obj->base, &handle);
821
822 /* drop reference from allocate - handle holds it now */
823 drm_gem_object_unreference_unlocked(&obj->base);
824 if (ret)
825 return ret;
826
827 args->handle = handle;
828 return 0;
829}
830
831int
832i915_gem_init_userptr(struct drm_device *dev)
833{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100834 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100835 mutex_init(&dev_priv->mm_lock);
836 hash_init(dev_priv->mm_structs);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100837 return 0;
838}