blob: a84625b712267bd8b07e39f0777e83dd267bb289 [file] [log] [blame]
Chris Wilson5cc9ed42014-05-16 14:22:37 +01001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
Maarten Lankhorstb588c922015-05-13 09:56:00 +020025#include <drm/drmP.h>
26#include <drm/i915_drm.h>
Chris Wilson5cc9ed42014-05-16 14:22:37 +010027#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
Chris Wilsonad46cb52014-08-07 14:20:40 +010035struct i915_mm_struct {
36 struct mm_struct *mm;
Chris Wilsonf470b192016-04-05 15:00:01 +010037 struct drm_i915_private *i915;
Chris Wilsonad46cb52014-08-07 14:20:40 +010038 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
Chris Wilson5cc9ed42014-05-16 14:22:37 +010044#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
Chris Wilson393afc22016-04-05 14:59:59 +010052 struct workqueue_struct *wq;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010053};
54
55struct i915_mmu_object {
Chris Wilsonad46cb52014-08-07 14:20:40 +010056 struct i915_mmu_notifier *mn;
Chris Wilson768e1592016-01-21 17:32:43 +000057 struct drm_i915_gem_object *obj;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010058 struct interval_tree_node it;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010059 struct list_head link;
Chris Wilson380996a2015-10-01 12:34:47 +010060 struct work_struct work;
Chris Wilson768e1592016-01-21 17:32:43 +000061 bool attached;
Chris Wilson5cc9ed42014-05-16 14:22:37 +010062};
63
Chris Wilson393afc22016-04-05 14:59:59 +010064static void wait_rendering(struct drm_i915_gem_object *obj)
65{
66 struct drm_device *dev = obj->base.dev;
67 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
Chris Wilson393afc22016-04-05 14:59:59 +010068 int i, n;
69
70 if (!obj->active)
71 return;
72
73 n = 0;
74 for (i = 0; i < I915_NUM_ENGINES; i++) {
75 struct drm_i915_gem_request *req;
76
77 req = obj->last_read_req[i];
78 if (req == NULL)
79 continue;
80
81 requests[n++] = i915_gem_request_reference(req);
82 }
83
Chris Wilson393afc22016-04-05 14:59:59 +010084 mutex_unlock(&dev->struct_mutex);
85
86 for (i = 0; i < n; i++)
Chris Wilson299259a2016-04-13 17:35:06 +010087 __i915_wait_request(requests[i], false, NULL, NULL);
Chris Wilson393afc22016-04-05 14:59:59 +010088
89 mutex_lock(&dev->struct_mutex);
90
91 for (i = 0; i < n; i++)
92 i915_gem_request_unreference(requests[i]);
93}
94
Chris Wilson768e1592016-01-21 17:32:43 +000095static void cancel_userptr(struct work_struct *work)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010096{
Chris Wilson380996a2015-10-01 12:34:47 +010097 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
98 struct drm_i915_gem_object *obj = mo->obj;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +010099 struct drm_device *dev = obj->base.dev;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100100
101 mutex_lock(&dev->struct_mutex);
102 /* Cancel any active worker and force us to re-evaluate gup */
103 obj->userptr.work = NULL;
104
105 if (obj->pages != NULL) {
106 struct drm_i915_private *dev_priv = to_i915(dev);
107 struct i915_vma *vma, *tmp;
108 bool was_interruptible;
109
Chris Wilson393afc22016-04-05 14:59:59 +0100110 wait_rendering(obj);
111
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100112 was_interruptible = dev_priv->mm.interruptible;
113 dev_priv->mm.interruptible = false;
114
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100115 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
116 WARN_ON(i915_vma_unbind(vma));
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100117 WARN_ON(i915_gem_object_put_pages(obj));
118
119 dev_priv->mm.interruptible = was_interruptible;
120 }
121
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100122 drm_gem_object_unreference(&obj->base);
123 mutex_unlock(&dev->struct_mutex);
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100124}
125
Chris Wilson768e1592016-01-21 17:32:43 +0000126static void add_object(struct i915_mmu_object *mo)
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100127{
Chris Wilson768e1592016-01-21 17:32:43 +0000128 if (mo->attached)
129 return;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100130
Chris Wilson768e1592016-01-21 17:32:43 +0000131 interval_tree_insert(&mo->it, &mo->mn->objects);
132 mo->attached = true;
133}
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100134
Chris Wilson768e1592016-01-21 17:32:43 +0000135static void del_object(struct i915_mmu_object *mo)
136{
137 if (!mo->attached)
138 return;
139
140 interval_tree_remove(&mo->it, &mo->mn->objects);
141 mo->attached = false;
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100142}
143
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100144static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
145 struct mm_struct *mm,
146 unsigned long start,
147 unsigned long end)
148{
Chris Wilson380996a2015-10-01 12:34:47 +0100149 struct i915_mmu_notifier *mn =
150 container_of(_mn, struct i915_mmu_notifier, mn);
151 struct i915_mmu_object *mo;
Chris Wilson768e1592016-01-21 17:32:43 +0000152 struct interval_tree_node *it;
153 LIST_HEAD(cancelled);
154
155 if (RB_EMPTY_ROOT(&mn->objects))
156 return;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100157
Chris Wilson380996a2015-10-01 12:34:47 +0100158 /* interval ranges are inclusive, but invalidate range is exclusive */
159 end--;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100160
Chris Wilson380996a2015-10-01 12:34:47 +0100161 spin_lock(&mn->lock);
Chris Wilson768e1592016-01-21 17:32:43 +0000162 it = interval_tree_iter_first(&mn->objects, start, end);
163 while (it) {
164 /* The mmu_object is released late when destroying the
165 * GEM object so it is entirely possible to gain a
166 * reference on an object in the process of being freed
167 * since our serialisation is via the spinlock and not
168 * the struct_mutex - and consequently use it after it
169 * is freed and then double free it. To prevent that
170 * use-after-free we only acquire a reference on the
171 * object if it is not in the process of being destroyed.
172 */
173 mo = container_of(it, struct i915_mmu_object, it);
174 if (kref_get_unless_zero(&mo->obj->base.refcount))
Chris Wilson393afc22016-04-05 14:59:59 +0100175 queue_work(mn->wq, &mo->work);
Michał Winiarski460822b2015-02-03 15:48:17 +0100176
Chris Wilson768e1592016-01-21 17:32:43 +0000177 list_add(&mo->link, &cancelled);
178 it = interval_tree_iter_next(it, start, end);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100179 }
Chris Wilson768e1592016-01-21 17:32:43 +0000180 list_for_each_entry(mo, &cancelled, link)
181 del_object(mo);
Chris Wilson380996a2015-10-01 12:34:47 +0100182 spin_unlock(&mn->lock);
Chris Wilson393afc22016-04-05 14:59:59 +0100183
184 flush_workqueue(mn->wq);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100185}
186
187static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
188 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
189};
190
191static struct i915_mmu_notifier *
Chris Wilsonad46cb52014-08-07 14:20:40 +0100192i915_mmu_notifier_create(struct mm_struct *mm)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100193{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100194 struct i915_mmu_notifier *mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100195 int ret;
196
Chris Wilsonad46cb52014-08-07 14:20:40 +0100197 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
198 if (mn == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100199 return ERR_PTR(-ENOMEM);
200
Chris Wilsonad46cb52014-08-07 14:20:40 +0100201 spin_lock_init(&mn->lock);
202 mn->mn.ops = &i915_gem_userptr_notifier;
203 mn->objects = RB_ROOT;
Chris Wilson393afc22016-04-05 14:59:59 +0100204 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
205 if (mn->wq == NULL) {
206 kfree(mn);
207 return ERR_PTR(-ENOMEM);
208 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100209
Chris Wilsonad46cb52014-08-07 14:20:40 +0100210 /* Protected by mmap_sem (write-lock) */
211 ret = __mmu_notifier_register(&mn->mn, mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100212 if (ret) {
Chris Wilson393afc22016-04-05 14:59:59 +0100213 destroy_workqueue(mn->wq);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100214 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100215 return ERR_PTR(ret);
216 }
217
Chris Wilsonad46cb52014-08-07 14:20:40 +0100218 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100219}
220
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100221static void
222i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
223{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100224 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100225
Chris Wilsonad46cb52014-08-07 14:20:40 +0100226 mo = obj->userptr.mmu_object;
227 if (mo == NULL)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100228 return;
229
Chris Wilson768e1592016-01-21 17:32:43 +0000230 spin_lock(&mo->mn->lock);
231 del_object(mo);
232 spin_unlock(&mo->mn->lock);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100233 kfree(mo);
234
235 obj->userptr.mmu_object = NULL;
236}
237
238static struct i915_mmu_notifier *
239i915_mmu_notifier_find(struct i915_mm_struct *mm)
240{
Chris Wilsone9681362014-09-26 10:31:02 +0100241 struct i915_mmu_notifier *mn = mm->mn;
242
243 mn = mm->mn;
244 if (mn)
245 return mn;
246
247 down_write(&mm->mm->mmap_sem);
Chris Wilsonf470b192016-04-05 15:00:01 +0100248 mutex_lock(&mm->i915->mm_lock);
Chris Wilsone9681362014-09-26 10:31:02 +0100249 if ((mn = mm->mn) == NULL) {
250 mn = i915_mmu_notifier_create(mm->mm);
251 if (!IS_ERR(mn))
252 mm->mn = mn;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100253 }
Chris Wilsonf470b192016-04-05 15:00:01 +0100254 mutex_unlock(&mm->i915->mm_lock);
Chris Wilsone9681362014-09-26 10:31:02 +0100255 up_write(&mm->mm->mmap_sem);
256
257 return mn;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100258}
259
260static int
261i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
262 unsigned flags)
263{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100264 struct i915_mmu_notifier *mn;
265 struct i915_mmu_object *mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100266
267 if (flags & I915_USERPTR_UNSYNCHRONIZED)
268 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
269
Chris Wilsonad46cb52014-08-07 14:20:40 +0100270 if (WARN_ON(obj->userptr.mm == NULL))
271 return -EINVAL;
272
273 mn = i915_mmu_notifier_find(obj->userptr.mm);
274 if (IS_ERR(mn))
275 return PTR_ERR(mn);
276
277 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
278 if (mo == NULL)
279 return -ENOMEM;
280
281 mo->mn = mn;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100282 mo->obj = obj;
Chris Wilson768e1592016-01-21 17:32:43 +0000283 mo->it.start = obj->userptr.ptr;
284 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
285 INIT_WORK(&mo->work, cancel_userptr);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100286
Chris Wilsonad46cb52014-08-07 14:20:40 +0100287 obj->userptr.mmu_object = mo;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100288 return 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100289}
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100290
Chris Wilsonad46cb52014-08-07 14:20:40 +0100291static void
292i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
293 struct mm_struct *mm)
294{
295 if (mn == NULL)
296 return;
297
298 mmu_notifier_unregister(&mn->mn, mm);
Chris Wilson393afc22016-04-05 14:59:59 +0100299 destroy_workqueue(mn->wq);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100300 kfree(mn);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100301}
302
303#else
304
305static void
306i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
307{
308}
309
310static int
311i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
312 unsigned flags)
313{
314 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
315 return -ENODEV;
316
317 if (!capable(CAP_SYS_ADMIN))
318 return -EPERM;
319
320 return 0;
321}
Chris Wilsonad46cb52014-08-07 14:20:40 +0100322
323static void
324i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
325 struct mm_struct *mm)
326{
327}
328
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100329#endif
330
Chris Wilsonad46cb52014-08-07 14:20:40 +0100331static struct i915_mm_struct *
332__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
333{
334 struct i915_mm_struct *mm;
335
336 /* Protected by dev_priv->mm_lock */
337 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
338 if (mm->mm == real)
339 return mm;
340
341 return NULL;
342}
343
344static int
345i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
346{
347 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
348 struct i915_mm_struct *mm;
349 int ret = 0;
350
351 /* During release of the GEM object we hold the struct_mutex. This
352 * precludes us from calling mmput() at that time as that may be
353 * the last reference and so call exit_mmap(). exit_mmap() will
354 * attempt to reap the vma, and if we were holding a GTT mmap
355 * would then call drm_gem_vm_close() and attempt to reacquire
356 * the struct mutex. So in order to avoid that recursion, we have
357 * to defer releasing the mm reference until after we drop the
358 * struct_mutex, i.e. we need to schedule a worker to do the clean
359 * up.
360 */
361 mutex_lock(&dev_priv->mm_lock);
362 mm = __i915_mm_struct_find(dev_priv, current->mm);
363 if (mm == NULL) {
364 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
365 if (mm == NULL) {
366 ret = -ENOMEM;
367 goto out;
368 }
369
370 kref_init(&mm->kref);
Chris Wilsonf470b192016-04-05 15:00:01 +0100371 mm->i915 = to_i915(obj->base.dev);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100372
373 mm->mm = current->mm;
374 atomic_inc(&current->mm->mm_count);
375
376 mm->mn = NULL;
377
378 /* Protected by dev_priv->mm_lock */
379 hash_add(dev_priv->mm_structs,
380 &mm->node, (unsigned long)mm->mm);
381 } else
382 kref_get(&mm->kref);
383
384 obj->userptr.mm = mm;
385out:
386 mutex_unlock(&dev_priv->mm_lock);
387 return ret;
388}
389
390static void
391__i915_mm_struct_free__worker(struct work_struct *work)
392{
393 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
394 i915_mmu_notifier_free(mm->mn, mm->mm);
395 mmdrop(mm->mm);
396 kfree(mm);
397}
398
399static void
400__i915_mm_struct_free(struct kref *kref)
401{
402 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
403
404 /* Protected by dev_priv->mm_lock */
405 hash_del(&mm->node);
Chris Wilsonf470b192016-04-05 15:00:01 +0100406 mutex_unlock(&mm->i915->mm_lock);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100407
408 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
409 schedule_work(&mm->work);
410}
411
412static void
413i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
414{
415 if (obj->userptr.mm == NULL)
416 return;
417
418 kref_put_mutex(&obj->userptr.mm->kref,
419 __i915_mm_struct_free,
420 &to_i915(obj->base.dev)->mm_lock);
421 obj->userptr.mm = NULL;
422}
423
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100424struct get_pages_work {
425 struct work_struct work;
426 struct drm_i915_gem_object *obj;
427 struct task_struct *task;
428};
429
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100430#if IS_ENABLED(CONFIG_SWIOTLB)
431#define swiotlb_active() swiotlb_nr_tbl()
432#else
433#define swiotlb_active() 0
434#endif
435
436static int
437st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
438{
439 struct scatterlist *sg;
440 int ret, n;
441
442 *st = kmalloc(sizeof(**st), GFP_KERNEL);
443 if (*st == NULL)
444 return -ENOMEM;
445
446 if (swiotlb_active()) {
447 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
448 if (ret)
449 goto err;
450
451 for_each_sg((*st)->sgl, sg, num_pages, n)
452 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
453 } else {
454 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
455 0, num_pages << PAGE_SHIFT,
456 GFP_KERNEL);
457 if (ret)
458 goto err;
459 }
460
461 return 0;
462
463err:
464 kfree(*st);
465 *st = NULL;
466 return ret;
467}
468
Imre Deake2273302015-07-09 12:59:05 +0300469static int
470__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
471 struct page **pvec, int num_pages)
472{
473 int ret;
474
475 ret = st_set_pages(&obj->pages, pvec, num_pages);
476 if (ret)
477 return ret;
478
479 ret = i915_gem_gtt_prepare_object(obj);
480 if (ret) {
481 sg_free_table(obj->pages);
482 kfree(obj->pages);
483 obj->pages = NULL;
484 }
485
486 return ret;
487}
488
Chris Wilson380996a2015-10-01 12:34:47 +0100489static int
Chris Wilsone4b946b2015-10-01 12:34:46 +0100490__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
491 bool value)
492{
Chris Wilson380996a2015-10-01 12:34:47 +0100493 int ret = 0;
494
Chris Wilsone4b946b2015-10-01 12:34:46 +0100495 /* During mm_invalidate_range we need to cancel any userptr that
496 * overlaps the range being invalidated. Doing so requires the
497 * struct_mutex, and that risks recursion. In order to cause
498 * recursion, the user must alias the userptr address space with
499 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
500 * to invalidate that mmaping, mm_invalidate_range is called with
501 * the userptr address *and* the struct_mutex held. To prevent that
502 * we set a flag under the i915_mmu_notifier spinlock to indicate
503 * whether this object is valid.
504 */
505#if defined(CONFIG_MMU_NOTIFIER)
506 if (obj->userptr.mmu_object == NULL)
Chris Wilson380996a2015-10-01 12:34:47 +0100507 return 0;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100508
509 spin_lock(&obj->userptr.mmu_object->mn->lock);
Chris Wilson380996a2015-10-01 12:34:47 +0100510 /* In order to serialise get_pages with an outstanding
511 * cancel_userptr, we must drop the struct_mutex and try again.
512 */
Chris Wilson768e1592016-01-21 17:32:43 +0000513 if (!value)
514 del_object(obj->userptr.mmu_object);
515 else if (!work_pending(&obj->userptr.mmu_object->work))
516 add_object(obj->userptr.mmu_object);
Chris Wilson380996a2015-10-01 12:34:47 +0100517 else
518 ret = -EAGAIN;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100519 spin_unlock(&obj->userptr.mmu_object->mn->lock);
520#endif
Chris Wilson380996a2015-10-01 12:34:47 +0100521
522 return ret;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100523}
524
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100525static void
526__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
527{
528 struct get_pages_work *work = container_of(_work, typeof(*work), work);
529 struct drm_i915_gem_object *obj = work->obj;
530 struct drm_device *dev = obj->base.dev;
Chris Wilson68d6c842015-10-01 12:34:45 +0100531 const int npages = obj->base.size >> PAGE_SHIFT;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100532 struct page **pvec;
533 int pinned, ret;
534
535 ret = -ENOMEM;
536 pinned = 0;
537
Chris Wilsonf2a85e12016-04-08 12:11:13 +0100538 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100539 if (pvec != NULL) {
Chris Wilsonad46cb52014-08-07 14:20:40 +0100540 struct mm_struct *mm = obj->userptr.mm->mm;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100541
Chris Wilson40313f02016-04-05 15:00:00 +0100542 ret = -EFAULT;
543 if (atomic_inc_not_zero(&mm->mm_users)) {
544 down_read(&mm->mmap_sem);
545 while (pinned < npages) {
546 ret = get_user_pages_remote
547 (work->task, mm,
548 obj->userptr.ptr + pinned * PAGE_SIZE,
549 npages - pinned,
550 !obj->userptr.read_only, 0,
551 pvec + pinned, NULL);
552 if (ret < 0)
553 break;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100554
Chris Wilson40313f02016-04-05 15:00:00 +0100555 pinned += ret;
556 }
557 up_read(&mm->mmap_sem);
558 mmput(mm);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100559 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100560 }
561
562 mutex_lock(&dev->struct_mutex);
Chris Wilson68d6c842015-10-01 12:34:45 +0100563 if (obj->userptr.work == &work->work) {
564 if (pinned == npages) {
565 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
566 if (ret == 0) {
567 list_add_tail(&obj->global_list,
568 &to_i915(dev)->mm.unbound_list);
569 obj->get_page.sg = obj->pages->sgl;
570 obj->get_page.last = 0;
571 pinned = 0;
572 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100573 }
Chris Wilson68d6c842015-10-01 12:34:45 +0100574 obj->userptr.work = ERR_PTR(ret);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100575 if (ret)
576 __i915_gem_userptr_set_active(obj, false);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100577 }
578
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100579 obj->userptr.workers--;
580 drm_gem_object_unreference(&obj->base);
581 mutex_unlock(&dev->struct_mutex);
582
583 release_pages(pvec, pinned, 0);
584 drm_free_large(pvec);
585
586 put_task_struct(work->task);
587 kfree(work);
588}
589
590static int
Chris Wilsone4b946b2015-10-01 12:34:46 +0100591__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
592 bool *active)
593{
594 struct get_pages_work *work;
595
596 /* Spawn a worker so that we can acquire the
597 * user pages without holding our mutex. Access
598 * to the user pages requires mmap_sem, and we have
599 * a strict lock ordering of mmap_sem, struct_mutex -
600 * we already hold struct_mutex here and so cannot
601 * call gup without encountering a lock inversion.
602 *
603 * Userspace will keep on repeating the operation
604 * (thanks to EAGAIN) until either we hit the fast
605 * path or the worker completes. If the worker is
606 * cancelled or superseded, the task is still run
607 * but the results ignored. (This leads to
608 * complications that we may have a stray object
609 * refcount that we need to be wary of when
610 * checking for existing objects during creation.)
611 * If the worker encounters an error, it reports
612 * that error back to this function through
613 * obj->userptr.work = ERR_PTR.
614 */
615 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
616 return -EAGAIN;
617
618 work = kmalloc(sizeof(*work), GFP_KERNEL);
619 if (work == NULL)
620 return -ENOMEM;
621
622 obj->userptr.work = &work->work;
623 obj->userptr.workers++;
624
625 work->obj = obj;
626 drm_gem_object_reference(&obj->base);
627
628 work->task = current;
629 get_task_struct(work->task);
630
631 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
632 schedule_work(&work->work);
633
634 *active = true;
635 return -EAGAIN;
636}
637
638static int
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100639i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
640{
641 const int num_pages = obj->base.size >> PAGE_SHIFT;
642 struct page **pvec;
643 int pinned, ret;
Chris Wilsone4b946b2015-10-01 12:34:46 +0100644 bool active;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100645
646 /* If userspace should engineer that these pages are replaced in
647 * the vma between us binding this page into the GTT and completion
648 * of rendering... Their loss. If they change the mapping of their
649 * pages they need to create a new bo to point to the new vma.
650 *
651 * However, that still leaves open the possibility of the vma
652 * being copied upon fork. Which falls under the same userspace
653 * synchronisation issue as a regular bo, except that this time
654 * the process may not be expecting that a particular piece of
655 * memory is tied to the GPU.
656 *
657 * Fortunately, we can hook into the mmu_notifier in order to
658 * discard the page references prior to anything nasty happening
659 * to the vma (discard or cloning) which should prevent the more
660 * egregious cases from causing harm.
661 */
Chris Wilsone4b946b2015-10-01 12:34:46 +0100662 if (IS_ERR(obj->userptr.work)) {
663 /* active flag will have been dropped already by the worker */
664 ret = PTR_ERR(obj->userptr.work);
665 obj->userptr.work = NULL;
666 return ret;
667 }
668 if (obj->userptr.work)
669 /* active flag should still be held for the pending work */
670 return -EAGAIN;
671
672 /* Let the mmu-notifier know that we have begun and need cancellation */
Chris Wilson380996a2015-10-01 12:34:47 +0100673 ret = __i915_gem_userptr_set_active(obj, true);
674 if (ret)
675 return ret;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100676
677 pvec = NULL;
678 pinned = 0;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100679 if (obj->userptr.mm->mm == current->mm) {
Chris Wilsonf2a85e12016-04-08 12:11:13 +0100680 pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
681 GFP_TEMPORARY);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100682 if (pvec == NULL) {
Chris Wilsonf2a85e12016-04-08 12:11:13 +0100683 __i915_gem_userptr_set_active(obj, false);
684 return -ENOMEM;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100685 }
686
687 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
688 !obj->userptr.read_only, pvec);
689 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100690
Chris Wilsone4b946b2015-10-01 12:34:46 +0100691 active = false;
692 if (pinned < 0)
693 ret = pinned, pinned = 0;
694 else if (pinned < num_pages)
695 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
696 else
Imre Deake2273302015-07-09 12:59:05 +0300697 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100698 if (ret) {
699 __i915_gem_userptr_set_active(obj, active);
700 release_pages(pvec, pinned, 0);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100701 }
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100702 drm_free_large(pvec);
703 return ret;
704}
705
706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100709 struct sg_page_iter sg_iter;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100710
711 BUG_ON(obj->userptr.work != NULL);
Chris Wilsone4b946b2015-10-01 12:34:46 +0100712 __i915_gem_userptr_set_active(obj, false);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100713
714 if (obj->madv != I915_MADV_WILLNEED)
715 obj->dirty = 0;
716
Imre Deake2273302015-07-09 12:59:05 +0300717 i915_gem_gtt_finish_object(obj);
718
Tvrtko Ursulinc479f432014-09-26 15:05:22 +0100719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
720 struct page *page = sg_page_iter_page(&sg_iter);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100721
722 if (obj->dirty)
723 set_page_dirty(page);
724
725 mark_page_accessed(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300726 put_page(page);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100727 }
728 obj->dirty = 0;
729
730 sg_free_table(obj->pages);
731 kfree(obj->pages);
732}
733
734static void
735i915_gem_userptr_release(struct drm_i915_gem_object *obj)
736{
737 i915_gem_userptr_release__mmu_notifier(obj);
Chris Wilsonad46cb52014-08-07 14:20:40 +0100738 i915_gem_userptr_release__mm_struct(obj);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100739}
740
741static int
742i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
743{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100744 if (obj->userptr.mmu_object)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100745 return 0;
746
747 return i915_gem_userptr_init__mmu_notifier(obj, 0);
748}
749
750static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
Chris Wilsonde472662016-01-22 18:32:31 +0000751 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100752 .get_pages = i915_gem_userptr_get_pages,
753 .put_pages = i915_gem_userptr_put_pages,
Chris Wilsonde472662016-01-22 18:32:31 +0000754 .dmabuf_export = i915_gem_userptr_dmabuf_export,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100755 .release = i915_gem_userptr_release,
756};
757
758/**
759 * Creates a new mm object that wraps some normal memory from the process
760 * context - user memory.
761 *
762 * We impose several restrictions upon the memory being mapped
763 * into the GPU.
764 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100765 * 2. It must be normal system memory, not a pointer into another map of IO
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100766 * space (e.g. it must not be a GTT mmapping of another object).
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100767 * 3. We only allow a bo as large as we could in theory map into the GTT,
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100768 * that is we limit the size to the total size of the GTT.
Chris Wilsonec8b0dd2014-07-21 13:21:23 +0100769 * 4. The bo is marked as being snoopable. The backing pages are left
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100770 * accessible directly by the CPU, but reads and writes by the GPU may
771 * incur the cost of a snoop (unless you have an LLC architecture).
772 *
773 * Synchronisation between multiple users and the GPU is left to userspace
774 * through the normal set-domain-ioctl. The kernel will enforce that the
775 * GPU relinquishes the VMA before it is returned back to the system
776 * i.e. upon free(), munmap() or process termination. However, the userspace
777 * malloc() library may not immediately relinquish the VMA after free() and
778 * instead reuse it whilst the GPU is still reading and writing to the VMA.
779 * Caveat emptor.
780 *
781 * Also note, that the object created here is not currently a "first class"
782 * object, in that several ioctls are banned. These are the CPU access
783 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
Chris Wilsoncc917ab2015-10-13 14:22:26 +0100784 * direct access via your pointer rather than use those ioctls. Another
785 * restriction is that we do not allow userptr surfaces to be pinned to the
786 * hardware and so we reject any attempt to create a framebuffer out of a
787 * userptr.
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100788 *
789 * If you think this is a good interface to use to pass GPU memory between
790 * drivers, please use dma-buf instead. In fact, wherever possible use
791 * dma-buf instead.
792 */
793int
794i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
795{
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100796 struct drm_i915_gem_userptr *args = data;
797 struct drm_i915_gem_object *obj;
798 int ret;
799 u32 handle;
800
Tvrtko Ursulinca377802016-03-02 12:10:31 +0000801 if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
802 /* We cannot support coherent userptr objects on hw without
803 * LLC and broken snooping.
804 */
805 return -ENODEV;
806 }
807
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100808 if (args->flags & ~(I915_USERPTR_READ_ONLY |
809 I915_USERPTR_UNSYNCHRONIZED))
810 return -EINVAL;
811
812 if (offset_in_page(args->user_ptr | args->user_size))
813 return -EINVAL;
814
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100815 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
816 (char __user *)(unsigned long)args->user_ptr, args->user_size))
817 return -EFAULT;
818
819 if (args->flags & I915_USERPTR_READ_ONLY) {
820 /* On almost all of the current hw, we cannot tell the GPU that a
821 * page is readonly, so this is just a placeholder in the uAPI.
822 */
823 return -ENODEV;
824 }
825
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100826 obj = i915_gem_object_alloc(dev);
827 if (obj == NULL)
828 return -ENOMEM;
829
830 drm_gem_private_object_init(dev, &obj->base, args->user_size);
831 i915_gem_object_init(obj, &i915_gem_userptr_ops);
832 obj->cache_level = I915_CACHE_LLC;
833 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
834 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
835
836 obj->userptr.ptr = args->user_ptr;
837 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
838
839 /* And keep a pointer to the current->mm for resolving the user pages
840 * at binding. This means that we need to hook into the mmu_notifier
841 * in order to detect if the mmu is destroyed.
842 */
Chris Wilsonad46cb52014-08-07 14:20:40 +0100843 ret = i915_gem_userptr_init__mm_struct(obj);
844 if (ret == 0)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100845 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
846 if (ret == 0)
847 ret = drm_gem_handle_create(file, &obj->base, &handle);
848
849 /* drop reference from allocate - handle holds it now */
850 drm_gem_object_unreference_unlocked(&obj->base);
851 if (ret)
852 return ret;
853
854 args->handle = handle;
855 return 0;
856}
857
Chris Wilson72778cb2016-05-19 16:17:16 +0100858void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100859{
Chris Wilsonad46cb52014-08-07 14:20:40 +0100860 mutex_init(&dev_priv->mm_lock);
861 hash_init(dev_priv->mm_structs);
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100862}