blob: 87273b0137ec830e46b252dfff849609d5d8f882 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
48 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
49 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
50 WARN_ON(i915_vma_unbind(vma));
51
52 GEM_BUG_ON(!i915_gem_object_is_active(obj));
53 if (--obj->active_count)
54 return;
55
56 /* Bump our place on the bound list to keep it roughly in LRU order
57 * so that we don't steal from recently used but inactive objects
58 * (unless we are forced to ofc!)
59 */
60 if (obj->bind_count)
61 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
62
63 obj->mm.dirty = true; /* be paranoid */
64
65 if (i915_gem_object_has_active_reference(obj)) {
66 i915_gem_object_clear_active_reference(obj);
67 i915_gem_object_put(obj);
68 }
69}
70
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020071static struct i915_vma *
72__i915_vma_create(struct drm_i915_gem_object *obj,
73 struct i915_address_space *vm,
74 const struct i915_ggtt_view *view)
75{
76 struct i915_vma *vma;
77 struct rb_node *rb, **p;
78 int i;
79
80 GEM_BUG_ON(vm->closed);
81
82 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
83 if (vma == NULL)
84 return ERR_PTR(-ENOMEM);
85
86 INIT_LIST_HEAD(&vma->exec_list);
87 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
88 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020089 init_request_active(&vma->last_fence, NULL);
90 list_add(&vma->vm_link, &vm->unbound_list);
91 vma->vm = vm;
92 vma->obj = obj;
93 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +000094 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020095
96 if (view) {
97 vma->ggtt_view = *view;
98 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +000099 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000100 view->partial.offset,
101 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000102 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000103 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200104 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000105 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000107 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200108 vma->size <<= PAGE_SHIFT;
109 }
110 }
111
112 if (i915_is_ggtt(vm)) {
Chris Wilson944397f2017-01-09 16:16:11 +0000113 GEM_BUG_ON(overflows_type(vma->size, u32));
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000114 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
115 i915_gem_object_get_tiling(obj),
116 i915_gem_object_get_stride(obj));
Chris Wilsonf51455d2017-01-10 14:47:34 +0000117 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000118
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000119 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
120 i915_gem_object_get_tiling(obj),
121 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000122 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
123
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200124 vma->flags |= I915_VMA_GGTT;
125 list_add(&vma->obj_link, &obj->vma_list);
126 } else {
127 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
128 list_add_tail(&vma->obj_link, &obj->vma_list);
129 }
130
131 rb = NULL;
132 p = &obj->vma_tree.rb_node;
133 while (*p) {
134 struct i915_vma *pos;
135
136 rb = *p;
137 pos = rb_entry(rb, struct i915_vma, obj_node);
138 if (i915_vma_compare(pos, vm, view) < 0)
139 p = &rb->rb_right;
140 else
141 p = &rb->rb_left;
142 }
143 rb_link_node(&vma->obj_node, rb, p);
144 rb_insert_color(&vma->obj_node, &obj->vma_tree);
145
146 return vma;
147}
148
Chris Wilson718659a2017-01-16 15:21:28 +0000149/**
150 * i915_vma_lookup - finds a matching VMA
151 * @obj: parent &struct drm_i915_gem_object to be mapped
152 * @vm: address space in which the mapping is located
153 * @view: additional mapping requirements
154 *
155 * i915_vma_lookup() looks up an existing VMA of the @obj in the @vm with
156 * the same @view characteristics.
157 *
158 * Must be called with struct_mutex held.
159 *
160 * Returns the vma if found, or NULL.
161 */
162struct i915_vma *
163i915_vma_lookup(struct drm_i915_gem_object *obj,
164 struct i915_address_space *vm,
165 const struct i915_ggtt_view *view)
166{
167 struct rb_node *rb;
168
169 lockdep_assert_held(&obj->base.dev->struct_mutex);
170
171 rb = obj->vma_tree.rb_node;
172 while (rb) {
173 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
174 long cmp;
175
176 cmp = i915_vma_compare(vma, vm, view);
177 if (cmp == 0)
178 return vma;
179
180 if (cmp < 0)
181 rb = rb->rb_right;
182 else
183 rb = rb->rb_left;
184 }
185
186 return NULL;
187}
188
189/**
190 * i915_vma_create - creates a VMA
191 * @obj: parent &struct drm_i915_gem_object to be mapped
192 * @vm: address space in which the mapping is located
193 * @view: additional mapping requirements
194 *
195 * i915_vma_create() allocates a new VMA of the @obj in the @vm with
196 * @view characteristics.
197 *
198 * Must be called with struct_mutex held.
199 *
200 * Returns the vma if found, or an error pointer.
201 */
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200202struct i915_vma *
203i915_vma_create(struct drm_i915_gem_object *obj,
204 struct i915_address_space *vm,
205 const struct i915_ggtt_view *view)
206{
207 lockdep_assert_held(&obj->base.dev->struct_mutex);
208 GEM_BUG_ON(view && !i915_is_ggtt(vm));
Chris Wilson718659a2017-01-16 15:21:28 +0000209 GEM_BUG_ON(i915_vma_lookup(obj, vm, view));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200210
211 return __i915_vma_create(obj, vm, view);
212}
213
214/**
Chris Wilson718659a2017-01-16 15:21:28 +0000215 * i915_vma_instance - return the singleton instance of the VMA
216 * @obj: parent &struct drm_i915_gem_object to be mapped
217 * @vm: address space in which the mapping is located
218 * @view: additional mapping requirements
219 *
220 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
221 * the same @view characteristics. If a match is not found, one is created.
222 * Once created, the VMA is kept until either the object is freed, or the
223 * address space is closed.
224 *
225 * Must be called with struct_mutex held.
226 *
227 * Returns the vma, or an error pointer.
228 */
229struct i915_vma *
230i915_vma_instance(struct drm_i915_gem_object *obj,
231 struct i915_address_space *vm,
232 const struct i915_ggtt_view *view)
233{
234 struct i915_vma *vma;
235
236 lockdep_assert_held(&obj->base.dev->struct_mutex);
237 GEM_BUG_ON(view && !i915_is_ggtt(vm));
238 GEM_BUG_ON(vm->closed);
239
240 vma = i915_vma_lookup(obj, vm, view);
241 if (!vma)
242 vma = i915_vma_create(obj, vm, view);
243
244 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
245 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_lookup(obj, vm, view) != vma);
246 return vma;
247}
248
249/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200250 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
251 * @vma: VMA to map
252 * @cache_level: mapping cache level
253 * @flags: flags like global or local mapping
254 *
255 * DMA addresses are taken from the scatter-gather table of this object (or of
256 * this VMA in case of non-default GGTT views) and PTE entries set up.
257 * Note that DMA addresses are also the only part of the SG table we care about.
258 */
259int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
260 u32 flags)
261{
262 u32 bind_flags;
263 u32 vma_flags;
264 int ret;
265
266 if (WARN_ON(flags == 0))
267 return -EINVAL;
268
269 bind_flags = 0;
270 if (flags & PIN_GLOBAL)
271 bind_flags |= I915_VMA_GLOBAL_BIND;
272 if (flags & PIN_USER)
273 bind_flags |= I915_VMA_LOCAL_BIND;
274
275 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
276 if (flags & PIN_UPDATE)
277 bind_flags |= vma_flags;
278 else
279 bind_flags &= ~vma_flags;
280 if (bind_flags == 0)
281 return 0;
282
Matthew Auld966d5bf2016-12-13 20:32:22 +0000283 if (GEM_WARN_ON(range_overflows(vma->node.start,
284 vma->node.size,
285 vma->vm->total)))
Matthew Auld7a0499a2016-12-13 20:32:20 +0000286 return -ENODEV;
287
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200288 if (vma_flags == 0 && vma->vm->allocate_va_range) {
289 trace_i915_va_alloc(vma);
290 ret = vma->vm->allocate_va_range(vma->vm,
291 vma->node.start,
292 vma->node.size);
293 if (ret)
294 return ret;
295 }
296
297 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
298 if (ret)
299 return ret;
300
301 vma->flags |= bind_flags;
302 return 0;
303}
304
305void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
306{
307 void __iomem *ptr;
308
309 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000310 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200311
Chris Wilson49d73912016-11-29 09:50:08 +0000312 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200313 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
314 return IO_ERR_PTR(-ENODEV);
315
316 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
317 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
318
319 ptr = vma->iomap;
320 if (ptr == NULL) {
321 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
322 vma->node.start,
323 vma->node.size);
324 if (ptr == NULL)
325 return IO_ERR_PTR(-ENOMEM);
326
327 vma->iomap = ptr;
328 }
329
330 __i915_vma_pin(vma);
331 return ptr;
332}
333
334void i915_vma_unpin_and_release(struct i915_vma **p_vma)
335{
336 struct i915_vma *vma;
337 struct drm_i915_gem_object *obj;
338
339 vma = fetch_and_zero(p_vma);
340 if (!vma)
341 return;
342
343 obj = vma->obj;
344
345 i915_vma_unpin(vma);
346 i915_vma_close(vma);
347
348 __i915_gem_object_release_unless_active(obj);
349}
350
351bool
352i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
353{
354 if (!drm_mm_node_allocated(&vma->node))
355 return false;
356
357 if (vma->node.size < size)
358 return true;
359
Chris Wilsonf51455d2017-01-10 14:47:34 +0000360 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
361 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200362 return true;
363
364 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
365 return true;
366
367 if (flags & PIN_OFFSET_BIAS &&
368 vma->node.start < (flags & PIN_OFFSET_MASK))
369 return true;
370
371 if (flags & PIN_OFFSET_FIXED &&
372 vma->node.start != (flags & PIN_OFFSET_MASK))
373 return true;
374
375 return false;
376}
377
378void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
379{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200380 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200381
Chris Wilson944397f2017-01-09 16:16:11 +0000382 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
383 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200384
385 /*
386 * Explicitly disable for rotated VMA since the display does not
387 * need the fence and the VMA is not accessible to other users.
388 */
Chris Wilson944397f2017-01-09 16:16:11 +0000389 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
390 return;
391
392 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000393 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000394
395 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
396
397 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200398 vma->flags |= I915_VMA_CAN_FENCE;
399 else
400 vma->flags &= ~I915_VMA_CAN_FENCE;
401}
402
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000403static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200404{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000405 return node->allocated && node->color != color;
406}
407
408bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
409{
410 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200411 struct drm_mm_node *other;
412
413 /*
414 * On some machines we have to be careful when putting differing types
415 * of snoopable memory together to avoid the prefetcher crossing memory
416 * domains and dying. During vm initialisation, we decide whether or not
417 * these constraints apply and set the drm_mm.color_adjust
418 * appropriately.
419 */
420 if (vma->vm->mm.color_adjust == NULL)
421 return true;
422
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000423 /* Only valid to be called on an already inserted vma */
424 GEM_BUG_ON(!drm_mm_node_allocated(node));
425 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200426
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000427 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100428 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200429 return false;
430
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000431 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100432 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200433 return false;
434
435 return true;
436}
437
438/**
439 * i915_vma_insert - finds a slot for the vma in its address space
440 * @vma: the vma
441 * @size: requested size in bytes (can be larger than the VMA)
442 * @alignment: required alignment
443 * @flags: mask of PIN_* flags to use
444 *
445 * First we try to allocate some free space that meets the requirements for
446 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
447 * preferrably the oldest idle entry to make room for the new VMA.
448 *
449 * Returns:
450 * 0 on success, negative error code otherwise.
451 */
452static int
453i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
454{
Chris Wilson49d73912016-11-29 09:50:08 +0000455 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200456 struct drm_i915_gem_object *obj = vma->obj;
457 u64 start, end;
458 int ret;
459
460 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
461 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
462
463 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000464 alignment = max(alignment, vma->display_alignment);
465 if (flags & PIN_MAPPABLE) {
466 size = max_t(typeof(size), size, vma->fence_size);
467 alignment = max_t(typeof(alignment),
468 alignment, vma->fence_alignment);
469 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200470
Chris Wilsonf51455d2017-01-10 14:47:34 +0000471 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
472 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
473 GEM_BUG_ON(!is_power_of_2(alignment));
474
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200475 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000476 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200477
478 end = vma->vm->total;
479 if (flags & PIN_MAPPABLE)
480 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
481 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000482 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
483 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200484
485 /* If binding the object/GGTT view requires more space than the entire
486 * aperture has, reject it early before evicting everything in a vain
487 * attempt to find space.
488 */
489 if (size > end) {
490 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
491 size, obj->base.size,
492 flags & PIN_MAPPABLE ? "mappable" : "total",
493 end);
494 return -E2BIG;
495 }
496
497 ret = i915_gem_object_pin_pages(obj);
498 if (ret)
499 return ret;
500
501 if (flags & PIN_OFFSET_FIXED) {
502 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000503 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000504 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200505 ret = -EINVAL;
506 goto err_unpin;
507 }
508
Chris Wilson625d9882017-01-11 11:23:11 +0000509 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
510 size, offset, obj->cache_level,
511 flags);
512 if (ret)
513 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200514 } else {
Chris Wilsone007b192017-01-11 11:23:10 +0000515 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
516 size, alignment, obj->cache_level,
517 start, end, flags);
518 if (ret)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200519 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200520
521 GEM_BUG_ON(vma->node.start < start);
522 GEM_BUG_ON(vma->node.start + vma->node.size > end);
523 }
524 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
525
526 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
527 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
528 obj->bind_count++;
529 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
530
531 return 0;
532
533err_unpin:
534 i915_gem_object_unpin_pages(obj);
535 return ret;
536}
537
538int __i915_vma_do_pin(struct i915_vma *vma,
539 u64 size, u64 alignment, u64 flags)
540{
541 unsigned int bound = vma->flags;
542 int ret;
543
Chris Wilson49d73912016-11-29 09:50:08 +0000544 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200545 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
546 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
547
548 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
549 ret = -EBUSY;
550 goto err;
551 }
552
553 if ((bound & I915_VMA_BIND_MASK) == 0) {
554 ret = i915_vma_insert(vma, size, alignment, flags);
555 if (ret)
556 goto err;
557 }
558
559 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
560 if (ret)
561 goto err;
562
563 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
564 __i915_vma_set_map_and_fenceable(vma);
565
Chris Wilson03257012017-01-11 21:09:26 +0000566 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200567 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
568 return 0;
569
570err:
571 __i915_vma_unpin(vma);
572 return ret;
573}
574
575void i915_vma_destroy(struct i915_vma *vma)
576{
577 GEM_BUG_ON(vma->node.allocated);
578 GEM_BUG_ON(i915_vma_is_active(vma));
579 GEM_BUG_ON(!i915_vma_is_closed(vma));
580 GEM_BUG_ON(vma->fence);
581
582 list_del(&vma->vm_link);
583 if (!i915_vma_is_ggtt(vma))
584 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
585
586 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
587}
588
589void i915_vma_close(struct i915_vma *vma)
590{
591 GEM_BUG_ON(i915_vma_is_closed(vma));
592 vma->flags |= I915_VMA_CLOSED;
593
594 list_del(&vma->obj_link);
595 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
596
597 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
598 WARN_ON(i915_vma_unbind(vma));
599}
600
601static void __i915_vma_iounmap(struct i915_vma *vma)
602{
603 GEM_BUG_ON(i915_vma_is_pinned(vma));
604
605 if (vma->iomap == NULL)
606 return;
607
608 io_mapping_unmap(vma->iomap);
609 vma->iomap = NULL;
610}
611
612int i915_vma_unbind(struct i915_vma *vma)
613{
614 struct drm_i915_gem_object *obj = vma->obj;
615 unsigned long active;
616 int ret;
617
618 lockdep_assert_held(&obj->base.dev->struct_mutex);
619
620 /* First wait upon any activity as retiring the request may
621 * have side-effects such as unpinning or even unbinding this vma.
622 */
623 active = i915_vma_get_active(vma);
624 if (active) {
625 int idx;
626
627 /* When a closed VMA is retired, it is unbound - eek.
628 * In order to prevent it from being recursively closed,
629 * take a pin on the vma so that the second unbind is
630 * aborted.
631 *
632 * Even more scary is that the retire callback may free
633 * the object (last active vma). To prevent the explosion
634 * we defer the actual object free to a worker that can
635 * only proceed once it acquires the struct_mutex (which
636 * we currently hold, therefore it cannot free this object
637 * before we are finished).
638 */
639 __i915_vma_pin(vma);
640
641 for_each_active(active, idx) {
642 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000643 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200644 if (ret)
645 break;
646 }
647
648 __i915_vma_unpin(vma);
649 if (ret)
650 return ret;
651
652 GEM_BUG_ON(i915_vma_is_active(vma));
653 }
654
655 if (i915_vma_is_pinned(vma))
656 return -EBUSY;
657
658 if (!drm_mm_node_allocated(&vma->node))
659 goto destroy;
660
661 GEM_BUG_ON(obj->bind_count == 0);
662 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
663
664 if (i915_vma_is_map_and_fenceable(vma)) {
665 /* release the fence reg _after_ flushing */
666 ret = i915_vma_put_fence(vma);
667 if (ret)
668 return ret;
669
670 /* Force a pagefault for domain tracking on next user access */
671 i915_gem_release_mmap(obj);
672
673 __i915_vma_iounmap(vma);
674 vma->flags &= ~I915_VMA_CAN_FENCE;
675 }
676
677 if (likely(!vma->vm->closed)) {
678 trace_i915_vma_unbind(vma);
679 vma->vm->unbind_vma(vma);
680 }
681 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
682
683 drm_mm_remove_node(&vma->node);
684 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
685
686 if (vma->pages != obj->mm.pages) {
687 GEM_BUG_ON(!vma->pages);
688 sg_free_table(vma->pages);
689 kfree(vma->pages);
690 }
691 vma->pages = NULL;
692
693 /* Since the unbound list is global, only move to that list if
694 * no more VMAs exist. */
695 if (--obj->bind_count == 0)
696 list_move_tail(&obj->global_link,
697 &to_i915(obj->base.dev)->mm.unbound_list);
698
699 /* And finally now the object is completely decoupled from this vma,
700 * we can drop its hold on the backing storage and allow it to be
701 * reaped by the shrinker.
702 */
703 i915_gem_object_unpin_pages(obj);
Chris Wilson7a5580a2016-12-31 11:20:09 +0000704 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200705
706destroy:
707 if (unlikely(i915_vma_is_closed(vma)))
708 i915_vma_destroy(vma);
709
710 return 0;
711}
712