blob: f137475fab518b0e3cfb0f026966ca4016b8a35b [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
48 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
49 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
50 WARN_ON(i915_vma_unbind(vma));
51
52 GEM_BUG_ON(!i915_gem_object_is_active(obj));
53 if (--obj->active_count)
54 return;
55
56 /* Bump our place on the bound list to keep it roughly in LRU order
57 * so that we don't steal from recently used but inactive objects
58 * (unless we are forced to ofc!)
59 */
60 if (obj->bind_count)
61 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
62
63 obj->mm.dirty = true; /* be paranoid */
64
65 if (i915_gem_object_has_active_reference(obj)) {
66 i915_gem_object_clear_active_reference(obj);
67 i915_gem_object_put(obj);
68 }
69}
70
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020071static struct i915_vma *
72__i915_vma_create(struct drm_i915_gem_object *obj,
73 struct i915_address_space *vm,
74 const struct i915_ggtt_view *view)
75{
76 struct i915_vma *vma;
77 struct rb_node *rb, **p;
78 int i;
79
80 GEM_BUG_ON(vm->closed);
81
82 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
83 if (vma == NULL)
84 return ERR_PTR(-ENOMEM);
85
86 INIT_LIST_HEAD(&vma->exec_list);
87 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
88 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020089 init_request_active(&vma->last_fence, NULL);
90 list_add(&vma->vm_link, &vm->unbound_list);
91 vma->vm = vm;
92 vma->obj = obj;
93 vma->size = obj->base.size;
Chris Wilson944397f2017-01-09 16:16:11 +000094 vma->display_alignment = 4096;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020095
96 if (view) {
97 vma->ggtt_view = *view;
98 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +000099 GEM_BUG_ON(range_overflows_t(u64,
100 view->params.partial.offset,
101 view->params.partial.size,
102 obj->base.size >> PAGE_SHIFT));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200103 vma->size = view->params.partial.size;
104 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000105 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
107 vma->size =
108 intel_rotation_info_size(&view->params.rotated);
109 vma->size <<= PAGE_SHIFT;
110 }
111 }
112
113 if (i915_is_ggtt(vm)) {
Chris Wilson944397f2017-01-09 16:16:11 +0000114 GEM_BUG_ON(overflows_type(vma->size, u32));
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000115 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
116 i915_gem_object_get_tiling(obj),
117 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000118 GEM_BUG_ON(vma->fence_size & 4095);
119
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000120 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
121 i915_gem_object_get_tiling(obj),
122 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000123 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
124
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200125 vma->flags |= I915_VMA_GGTT;
126 list_add(&vma->obj_link, &obj->vma_list);
127 } else {
128 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
129 list_add_tail(&vma->obj_link, &obj->vma_list);
130 }
131
132 rb = NULL;
133 p = &obj->vma_tree.rb_node;
134 while (*p) {
135 struct i915_vma *pos;
136
137 rb = *p;
138 pos = rb_entry(rb, struct i915_vma, obj_node);
139 if (i915_vma_compare(pos, vm, view) < 0)
140 p = &rb->rb_right;
141 else
142 p = &rb->rb_left;
143 }
144 rb_link_node(&vma->obj_node, rb, p);
145 rb_insert_color(&vma->obj_node, &obj->vma_tree);
146
147 return vma;
148}
149
150struct i915_vma *
151i915_vma_create(struct drm_i915_gem_object *obj,
152 struct i915_address_space *vm,
153 const struct i915_ggtt_view *view)
154{
155 lockdep_assert_held(&obj->base.dev->struct_mutex);
156 GEM_BUG_ON(view && !i915_is_ggtt(vm));
157 GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
158
159 return __i915_vma_create(obj, vm, view);
160}
161
162/**
163 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
164 * @vma: VMA to map
165 * @cache_level: mapping cache level
166 * @flags: flags like global or local mapping
167 *
168 * DMA addresses are taken from the scatter-gather table of this object (or of
169 * this VMA in case of non-default GGTT views) and PTE entries set up.
170 * Note that DMA addresses are also the only part of the SG table we care about.
171 */
172int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
173 u32 flags)
174{
175 u32 bind_flags;
176 u32 vma_flags;
177 int ret;
178
179 if (WARN_ON(flags == 0))
180 return -EINVAL;
181
182 bind_flags = 0;
183 if (flags & PIN_GLOBAL)
184 bind_flags |= I915_VMA_GLOBAL_BIND;
185 if (flags & PIN_USER)
186 bind_flags |= I915_VMA_LOCAL_BIND;
187
188 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
189 if (flags & PIN_UPDATE)
190 bind_flags |= vma_flags;
191 else
192 bind_flags &= ~vma_flags;
193 if (bind_flags == 0)
194 return 0;
195
Matthew Auld966d5bf2016-12-13 20:32:22 +0000196 if (GEM_WARN_ON(range_overflows(vma->node.start,
197 vma->node.size,
198 vma->vm->total)))
Matthew Auld7a0499a2016-12-13 20:32:20 +0000199 return -ENODEV;
200
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200201 if (vma_flags == 0 && vma->vm->allocate_va_range) {
202 trace_i915_va_alloc(vma);
203 ret = vma->vm->allocate_va_range(vma->vm,
204 vma->node.start,
205 vma->node.size);
206 if (ret)
207 return ret;
208 }
209
210 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
211 if (ret)
212 return ret;
213
214 vma->flags |= bind_flags;
215 return 0;
216}
217
218void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
219{
220 void __iomem *ptr;
221
222 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000223 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200224
Chris Wilson49d73912016-11-29 09:50:08 +0000225 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200226 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
227 return IO_ERR_PTR(-ENODEV);
228
229 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
230 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
231
232 ptr = vma->iomap;
233 if (ptr == NULL) {
234 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
235 vma->node.start,
236 vma->node.size);
237 if (ptr == NULL)
238 return IO_ERR_PTR(-ENOMEM);
239
240 vma->iomap = ptr;
241 }
242
243 __i915_vma_pin(vma);
244 return ptr;
245}
246
247void i915_vma_unpin_and_release(struct i915_vma **p_vma)
248{
249 struct i915_vma *vma;
250 struct drm_i915_gem_object *obj;
251
252 vma = fetch_and_zero(p_vma);
253 if (!vma)
254 return;
255
256 obj = vma->obj;
257
258 i915_vma_unpin(vma);
259 i915_vma_close(vma);
260
261 __i915_gem_object_release_unless_active(obj);
262}
263
264bool
265i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
266{
267 if (!drm_mm_node_allocated(&vma->node))
268 return false;
269
270 if (vma->node.size < size)
271 return true;
272
273 if (alignment && vma->node.start & (alignment - 1))
274 return true;
275
276 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
277 return true;
278
279 if (flags & PIN_OFFSET_BIAS &&
280 vma->node.start < (flags & PIN_OFFSET_MASK))
281 return true;
282
283 if (flags & PIN_OFFSET_FIXED &&
284 vma->node.start != (flags & PIN_OFFSET_MASK))
285 return true;
286
287 return false;
288}
289
290void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
291{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200292 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200293
Chris Wilson944397f2017-01-09 16:16:11 +0000294 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
295 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200296
297 /*
298 * Explicitly disable for rotated VMA since the display does not
299 * need the fence and the VMA is not accessible to other users.
300 */
Chris Wilson944397f2017-01-09 16:16:11 +0000301 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
302 return;
303
304 fenceable = (vma->node.size >= vma->fence_size &&
305 (vma->node.start & (vma->fence_alignment - 1)) == 0);
306
307 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
308
309 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200310 vma->flags |= I915_VMA_CAN_FENCE;
311 else
312 vma->flags &= ~I915_VMA_CAN_FENCE;
313}
314
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000315static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200316{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000317 return node->allocated && node->color != color;
318}
319
320bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
321{
322 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200323 struct drm_mm_node *other;
324
325 /*
326 * On some machines we have to be careful when putting differing types
327 * of snoopable memory together to avoid the prefetcher crossing memory
328 * domains and dying. During vm initialisation, we decide whether or not
329 * these constraints apply and set the drm_mm.color_adjust
330 * appropriately.
331 */
332 if (vma->vm->mm.color_adjust == NULL)
333 return true;
334
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000335 /* Only valid to be called on an already inserted vma */
336 GEM_BUG_ON(!drm_mm_node_allocated(node));
337 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200338
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000339 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100340 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200341 return false;
342
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000343 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100344 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200345 return false;
346
347 return true;
348}
349
350/**
351 * i915_vma_insert - finds a slot for the vma in its address space
352 * @vma: the vma
353 * @size: requested size in bytes (can be larger than the VMA)
354 * @alignment: required alignment
355 * @flags: mask of PIN_* flags to use
356 *
357 * First we try to allocate some free space that meets the requirements for
358 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
359 * preferrably the oldest idle entry to make room for the new VMA.
360 *
361 * Returns:
362 * 0 on success, negative error code otherwise.
363 */
364static int
365i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
366{
Chris Wilson49d73912016-11-29 09:50:08 +0000367 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200368 struct drm_i915_gem_object *obj = vma->obj;
369 u64 start, end;
370 int ret;
371
372 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
373 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
374
375 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000376 alignment = max(alignment, vma->display_alignment);
377 if (flags & PIN_MAPPABLE) {
378 size = max_t(typeof(size), size, vma->fence_size);
379 alignment = max_t(typeof(alignment),
380 alignment, vma->fence_alignment);
381 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200382
383 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
384
385 end = vma->vm->total;
386 if (flags & PIN_MAPPABLE)
387 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
388 if (flags & PIN_ZONE_4G)
389 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
390
391 /* If binding the object/GGTT view requires more space than the entire
392 * aperture has, reject it early before evicting everything in a vain
393 * attempt to find space.
394 */
395 if (size > end) {
396 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
397 size, obj->base.size,
398 flags & PIN_MAPPABLE ? "mappable" : "total",
399 end);
400 return -E2BIG;
401 }
402
403 ret = i915_gem_object_pin_pages(obj);
404 if (ret)
405 return ret;
406
407 if (flags & PIN_OFFSET_FIXED) {
408 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000409 if (offset & (alignment - 1) ||
410 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200411 ret = -EINVAL;
412 goto err_unpin;
413 }
414
415 vma->node.start = offset;
416 vma->node.size = size;
417 vma->node.color = obj->cache_level;
418 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
419 if (ret) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000420 ret = i915_gem_evict_for_vma(vma, flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200421 if (ret == 0)
422 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
423 if (ret)
424 goto err_unpin;
425 }
426 } else {
427 u32 search_flag, alloc_flag;
428
429 if (flags & PIN_HIGH) {
430 search_flag = DRM_MM_SEARCH_BELOW;
431 alloc_flag = DRM_MM_CREATE_TOP;
432 } else {
433 search_flag = DRM_MM_SEARCH_DEFAULT;
434 alloc_flag = DRM_MM_CREATE_DEFAULT;
435 }
436
437 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
438 * so we know that we always have a minimum alignment of 4096.
439 * The drm_mm range manager is optimised to return results
440 * with zero alignment, so where possible use the optimal
441 * path.
442 */
443 if (alignment <= 4096)
444 alignment = 0;
445
446search_free:
447 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
448 &vma->node,
449 size, alignment,
450 obj->cache_level,
451 start, end,
452 search_flag,
453 alloc_flag);
454 if (ret) {
455 ret = i915_gem_evict_something(vma->vm, size, alignment,
456 obj->cache_level,
457 start, end,
458 flags);
459 if (ret == 0)
460 goto search_free;
461
462 goto err_unpin;
463 }
464
465 GEM_BUG_ON(vma->node.start < start);
466 GEM_BUG_ON(vma->node.start + vma->node.size > end);
467 }
468 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
469
470 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
471 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
472 obj->bind_count++;
473 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
474
475 return 0;
476
477err_unpin:
478 i915_gem_object_unpin_pages(obj);
479 return ret;
480}
481
482int __i915_vma_do_pin(struct i915_vma *vma,
483 u64 size, u64 alignment, u64 flags)
484{
485 unsigned int bound = vma->flags;
486 int ret;
487
Chris Wilson49d73912016-11-29 09:50:08 +0000488 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200489 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
490 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
491
492 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
493 ret = -EBUSY;
494 goto err;
495 }
496
497 if ((bound & I915_VMA_BIND_MASK) == 0) {
498 ret = i915_vma_insert(vma, size, alignment, flags);
499 if (ret)
500 goto err;
501 }
502
503 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
504 if (ret)
505 goto err;
506
507 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
508 __i915_vma_set_map_and_fenceable(vma);
509
510 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
511 return 0;
512
513err:
514 __i915_vma_unpin(vma);
515 return ret;
516}
517
518void i915_vma_destroy(struct i915_vma *vma)
519{
520 GEM_BUG_ON(vma->node.allocated);
521 GEM_BUG_ON(i915_vma_is_active(vma));
522 GEM_BUG_ON(!i915_vma_is_closed(vma));
523 GEM_BUG_ON(vma->fence);
524
525 list_del(&vma->vm_link);
526 if (!i915_vma_is_ggtt(vma))
527 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
528
529 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
530}
531
532void i915_vma_close(struct i915_vma *vma)
533{
534 GEM_BUG_ON(i915_vma_is_closed(vma));
535 vma->flags |= I915_VMA_CLOSED;
536
537 list_del(&vma->obj_link);
538 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
539
540 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
541 WARN_ON(i915_vma_unbind(vma));
542}
543
544static void __i915_vma_iounmap(struct i915_vma *vma)
545{
546 GEM_BUG_ON(i915_vma_is_pinned(vma));
547
548 if (vma->iomap == NULL)
549 return;
550
551 io_mapping_unmap(vma->iomap);
552 vma->iomap = NULL;
553}
554
555int i915_vma_unbind(struct i915_vma *vma)
556{
557 struct drm_i915_gem_object *obj = vma->obj;
558 unsigned long active;
559 int ret;
560
561 lockdep_assert_held(&obj->base.dev->struct_mutex);
562
563 /* First wait upon any activity as retiring the request may
564 * have side-effects such as unpinning or even unbinding this vma.
565 */
566 active = i915_vma_get_active(vma);
567 if (active) {
568 int idx;
569
570 /* When a closed VMA is retired, it is unbound - eek.
571 * In order to prevent it from being recursively closed,
572 * take a pin on the vma so that the second unbind is
573 * aborted.
574 *
575 * Even more scary is that the retire callback may free
576 * the object (last active vma). To prevent the explosion
577 * we defer the actual object free to a worker that can
578 * only proceed once it acquires the struct_mutex (which
579 * we currently hold, therefore it cannot free this object
580 * before we are finished).
581 */
582 __i915_vma_pin(vma);
583
584 for_each_active(active, idx) {
585 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000586 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200587 if (ret)
588 break;
589 }
590
591 __i915_vma_unpin(vma);
592 if (ret)
593 return ret;
594
595 GEM_BUG_ON(i915_vma_is_active(vma));
596 }
597
598 if (i915_vma_is_pinned(vma))
599 return -EBUSY;
600
601 if (!drm_mm_node_allocated(&vma->node))
602 goto destroy;
603
604 GEM_BUG_ON(obj->bind_count == 0);
605 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
606
607 if (i915_vma_is_map_and_fenceable(vma)) {
608 /* release the fence reg _after_ flushing */
609 ret = i915_vma_put_fence(vma);
610 if (ret)
611 return ret;
612
613 /* Force a pagefault for domain tracking on next user access */
614 i915_gem_release_mmap(obj);
615
616 __i915_vma_iounmap(vma);
617 vma->flags &= ~I915_VMA_CAN_FENCE;
618 }
619
620 if (likely(!vma->vm->closed)) {
621 trace_i915_vma_unbind(vma);
622 vma->vm->unbind_vma(vma);
623 }
624 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
625
626 drm_mm_remove_node(&vma->node);
627 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
628
629 if (vma->pages != obj->mm.pages) {
630 GEM_BUG_ON(!vma->pages);
631 sg_free_table(vma->pages);
632 kfree(vma->pages);
633 }
634 vma->pages = NULL;
635
636 /* Since the unbound list is global, only move to that list if
637 * no more VMAs exist. */
638 if (--obj->bind_count == 0)
639 list_move_tail(&obj->global_link,
640 &to_i915(obj->base.dev)->mm.unbound_list);
641
642 /* And finally now the object is completely decoupled from this vma,
643 * we can drop its hold on the backing storage and allow it to be
644 * reaped by the shrinker.
645 */
646 i915_gem_object_unpin_pages(obj);
Chris Wilson7a5580a2016-12-31 11:20:09 +0000647 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200648
649destroy:
650 if (unlikely(i915_vma_is_closed(vma)))
651 i915_vma_destroy(vma);
652
653 return 0;
654}
655